Showing preview only (614K chars total). Download the full file or copy to clipboard to get everything.
Repository: yahoo/HaloDB
Branch: master
Commit: 767d2357a4c9
Files: 117
Total size: 575.4 KB
Directory structure:
gitextract_fajmuc1o/
├── .github/
│ └── workflows/
│ ├── maven-publish.yml
│ └── maven.yml
├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── Code-of-Conduct.md
├── LICENSE
├── NOTICE
├── README.md
├── benchmarks/
│ ├── README.md
│ ├── pom.xml
│ └── src/
│ └── main/
│ └── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ └── benchmarks/
│ ├── BenchmarkTool.java
│ ├── Benchmarks.java
│ ├── HaloDBStorageEngine.java
│ ├── KyotoStorageEngine.java
│ ├── RandomDataGenerator.java
│ ├── RocksDBStorageEngine.java
│ └── StorageEngine.java
├── docs/
│ ├── WhyHaloDB.md
│ └── benchmarks.md
├── pom.xml
└── src/
├── main/
│ └── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ ├── CompactionManager.java
│ ├── Constants.java
│ ├── DBDirectory.java
│ ├── DBMetaData.java
│ ├── FileUtils.java
│ ├── HaloDB.java
│ ├── HaloDBException.java
│ ├── HaloDBFile.java
│ ├── HaloDBInternal.java
│ ├── HaloDBIterator.java
│ ├── HaloDBKeyIterator.java
│ ├── HaloDBOptions.java
│ ├── HaloDBStats.java
│ ├── HashAlgorithm.java
│ ├── HashTableUtil.java
│ ├── HashTableValueSerializer.java
│ ├── Hasher.java
│ ├── InMemoryIndex.java
│ ├── InMemoryIndexMetaData.java
│ ├── InMemoryIndexMetaDataSerializer.java
│ ├── IndexFile.java
│ ├── IndexFileEntry.java
│ ├── JNANativeAllocator.java
│ ├── KeyBuffer.java
│ ├── LongArrayList.java
│ ├── MemoryPoolAddress.java
│ ├── MemoryPoolChunk.java
│ ├── MemoryPoolHashEntries.java
│ ├── NativeMemoryAllocator.java
│ ├── NonMemoryPoolHashEntries.java
│ ├── OffHeapHashTable.java
│ ├── OffHeapHashTableBuilder.java
│ ├── OffHeapHashTableImpl.java
│ ├── OffHeapHashTableStats.java
│ ├── Record.java
│ ├── RecordKey.java
│ ├── Segment.java
│ ├── SegmentNonMemoryPool.java
│ ├── SegmentStats.java
│ ├── SegmentWithMemoryPool.java
│ ├── TombstoneEntry.java
│ ├── TombstoneFile.java
│ ├── Uns.java
│ ├── UnsExt.java
│ ├── UnsExt8.java
│ ├── UnsafeAllocator.java
│ ├── Utils.java
│ ├── Versions.java
│ └── histo/
│ └── EstimatedHistogram.java
└── test/
├── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ ├── CheckOffHeapHashTable.java
│ ├── CheckSegment.java
│ ├── CompactionWithErrorsTest.java
│ ├── CrossCheckTest.java
│ ├── DBDirectoryTest.java
│ ├── DBMetaDataTest.java
│ ├── DBRepairTest.java
│ ├── DataConsistencyDB.java
│ ├── DataConsistencyTest.java
│ ├── DoubleCheckOffHeapHashTableImpl.java
│ ├── FileUtilsTest.java
│ ├── HaloDBCompactionTest.java
│ ├── HaloDBDeletionTest.java
│ ├── HaloDBFileCompactionTest.java
│ ├── HaloDBFileTest.java
│ ├── HaloDBIteratorTest.java
│ ├── HaloDBKeyIteratorTest.java
│ ├── HaloDBOptionsTest.java
│ ├── HaloDBStatsTest.java
│ ├── HaloDBTest.java
│ ├── HashTableTestUtils.java
│ ├── HashTableUtilTest.java
│ ├── HashTableValueSerializerTest.java
│ ├── HasherTest.java
│ ├── IndexFileEntryTest.java
│ ├── KeyBufferTest.java
│ ├── LinkedImplTest.java
│ ├── LongArrayListTest.java
│ ├── MemoryPoolChunkTest.java
│ ├── NonMemoryPoolHashEntriesTest.java
│ ├── OffHeapHashTableBuilderTest.java
│ ├── RandomDataGenerator.java
│ ├── RecordTest.java
│ ├── RehashTest.java
│ ├── SegmentWithMemoryPoolTest.java
│ ├── SequenceNumberTest.java
│ ├── SyncWriteTest.java
│ ├── TestBase.java
│ ├── TestListener.java
│ ├── TestUtils.java
│ ├── TombstoneFileCleanUpTest.java
│ ├── TombstoneFileTest.java
│ ├── UnsTest.java
│ └── histo/
│ └── EstimatedHistogramTest.java
└── resources/
└── log4j2-test.xml
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/maven-publish.yml
================================================
# This workflow will build a package using Maven and then publish it to GitHub packages when a release is created
# For more information see: https://github.com/actions/setup-java/blob/main/docs/advanced-usage.md#apache-maven-with-a-settings-path
name: Maven Package
on:
release:
types: [created]
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v2
- name: Set up JDK 8
uses: actions/setup-java@v2
with:
java-version: '8'
distribution: 'adopt'
server-id: github # Value of the distributionManagement/repository/id field of the pom.xml
settings-path: ${{ github.workspace }} # location for the settings.xml file
- name: Publish to GitHub Packages Apache Maven
run: mvn deploy -s $GITHUB_WORKSPACE/settings.xml
env:
GITHUB_TOKEN: ${{ github.token }}
================================================
FILE: .github/workflows/maven.yml
================================================
# This workflow will build a Java project with Maven
# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
name: Java CI with Maven
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up JDK 8
uses: actions/setup-java@v2
with:
java-version: '8'
distribution: 'adopt'
- name: Build with Maven
run: mvn -B package --file pom.xml
================================================
FILE: .gitignore
================================================
target
.idea
halodb.iml
tmp/
================================================
FILE: .travis.yml
================================================
language: java
dist: trusty
jdk:
- oraclejdk8
================================================
FILE: CHANGELOG.md
================================================
# HaloDB Change Log
## 0.4.3 (08/20/2018)
* Sequence number, instead of relying on system time, is now a number incremented for each write operation.
* Include compaction rate in stats.
## 0.4.2 (08/06/2018)
* Handle the case where db crashes while it is being repaired due to error from a previous crash.
* _put_ operation in _HaloDB_ now returns a boolean value indicating the status of the operation.
## 0.4.1 (7/16/2018)
* Include version, checksum and max file size in META file.
* _maxFileSize_ in _HaloDBOptions_ now accepts only int values.
## 0.4.0 (7/11/2018)
* Implemented memory pool for in-memory index.
================================================
FILE: CONTRIBUTING.md
================================================
# How to contribute
First, thanks for taking the time to contribute to our project! The following information provides a guide for making contributions.
## Code of Conduct
By participating in this project, you agree to abide by the [Oath Code of Conduct](Code-of-Conduct.md). Everyone is welcome to submit a pull request or open an issue to improve the documentation, add improvements, or report bugs.
## How to Ask a Question
If you simply have a question that needs an answer, [create an issue](https://help.github.com/articles/creating-an-issue/), and label it as a question.
## How To Contribute
### Report a Bug or Request a Feature
If you encounter any bugs while using this software, or want to request a new feature or enhancement, feel free to [create an issue](https://help.github.com/articles/creating-an-issue/) to report it, make sure you add a label to indicate what type of issue it is.
### Contribute Code
Pull requests are welcome for bug fixes. If you want to implement something new, please [request a feature first](#report-a-bug-or-request-a-feature) so we can discuss it.
#### Creating a Pull Request
Please follow [best practices](https://github.com/trein/dev-best-practices/wiki/Git-Commit-Best-Practices) for creating git commits.
When your code is ready to be submitted, you can [submit a pull request](https://help.github.com/articles/creating-a-pull-request/) to begin the code review process.
================================================
FILE: CONTRIBUTORS.md
================================================
HaloDB was designed and implemented by [Arjun Mannaly](https://github.com/amannaly)
================================================
FILE: Code-of-Conduct.md
================================================
# Oath Open Source Code of Conduct
## Summary
This Code of Conduct is our way to encourage good behavior and discourage bad behavior in our open source community. We invite participation from many people to bring different perspectives to support this project. We pledge to do our part to foster a welcoming and professional environment free of harassment. We expect participants to communicate professionally and thoughtfully during their involvement with this project.
Participants may lose their good standing by engaging in misconduct. For example: insulting, threatening, or conveying unwelcome sexual content. We ask participants who observe conduct issues to report the incident directly to the project's Response Team at opensource-conduct@oath.com. Oath will assign a respondent to address the issue. We may remove harassers from this project.
This code does not replace the terms of service or acceptable use policies of the websites used to support this project. We acknowledge that participants may be subject to additional conduct terms based on their employment which may govern their online expressions.
## Details
This Code of Conduct makes our expectations of participants in this community explicit.
* We forbid harassment and abusive speech within this community.
* We request participants to report misconduct to the project’s Response Team.
* We urge participants to refrain from using discussion forums to play out a fight.
### Expected Behaviors
We expect participants in this community to conduct themselves professionally. Since our primary mode of communication is text on an online forum (e.g. issues, pull requests, comments, emails, or chats) devoid of vocal tone, gestures, or other context that is often vital to understanding, it is important that participants are attentive to their interaction style.
* **Assume positive intent.** We ask community members to assume positive intent on the part of other people’s communications. We may disagree on details, but we expect all suggestions to be supportive of the community goals.
* **Respect participants.** We expect participants will occasionally disagree. Even if we reject an idea, we welcome everyone’s participation. Open Source projects are learning experiences. Ask, explore, challenge, and then respectfully assert if you agree or disagree. If your idea is rejected, be more persuasive not bitter.
* **Welcoming to new members.** New members bring new perspectives. Some may raise questions that have been addressed before. Kindly point them to existing discussions. Everyone is new to every project once.
* **Be kind to beginners.** Beginners use open source projects to get experience. They might not be talented coders yet, and projects should not accept poor quality code. But we were all beginners once, and we need to engage kindly.
* **Consider your impact on others.** Your work will be used by others, and you depend on the work of others. We expect community members to be considerate and establish a balance their self-interest with communal interest.
* **Use words carefully.** We may not understand intent when you say something ironic. Poe’s Law suggests that without an emoticon people will misinterpret sarcasm. We ask community members to communicate plainly.
* **Leave with class.** When you wish to resign from participating in this project for any reason, you are free to fork the code and create a competitive project. Open Source explicitly allows this. Your exit should not be dramatic or bitter.
### Unacceptable Behaviors
Participants remain in good standing when they do not engage in misconduct or harassment. To elaborate:
* **Don't be a bigot.** Calling out project members by their identity or background in a negative or insulting manner. This includes, but is not limited to, slurs or insinuations related to protected or suspect classes e.g. race, color, citizenship, national origin, political belief, religion, sexual orientation, gender identity and expression, age, size, culture, ethnicity, genetic features, language, profession, national minority statue, mental or physical ability.
* **Don't insult.** Insulting remarks about a person’s lifestyle practices.
* **Don't dox.** Revealing private information about other participants without explicit permission.
* **Don't intimidate.** Threats of violence or intimidation of any project member.
* **Don't creep.** Unwanted sexual attention or content unsuited for the subject of this project.
* **Don't disrupt.** Sustained disruptions in a discussion.
* **Let us help.** Refusal to assist the Response Team to resolve an issue in the community.
We do not list all forms of harassment, nor imply some forms of harassment are not worthy of action. Any participant who *feels* harassed or *observes* harassment, should report the incident. Victim of harassment should not address grievances in the public forum, as this often intensifies the problem. Report it, and let us address it off-line.
### Reporting Issues
If you experience or witness misconduct, or have any other concerns about the conduct of members of this project, please report it by contacting our Response Team at opensource-conduct@oath.com who will handle your report with discretion. Your report should include:
* Your preferred contact information. We cannot process anonymous reports.
* Names (real or usernames) of those involved in the incident.
* Your account of what occurred, and if the incident is ongoing. Please provide links to or transcripts of the publicly available records (e.g. a mailing list archive or a public IRC logger), so that we can review it.
* Any additional information that may be helpful to achieve resolution.
After filing a report, a representative will contact you directly to review the incident and ask additional questions. If a member of the Oath Response Team is named in an incident report, that member will be recused from handling your incident. If the complaint originates from a member of the Response Team, it will be addressed by a different member of the Response Team. We will consider reports to be confidential for the purpose of protecting victims of abuse.
### Scope
Oath will assign a Response Team member with admin rights on the project and legal rights on the project copyright. The Response Team is empowered to restrict some privileges to the project as needed. Since this project is governed by an open source license, any participant may fork the code under the terms of the project license. The Response Team’s goal is to preserve the project if possible, and will restrict or remove participation from those who disrupt the project.
This code does not replace the terms of service or acceptable use policies that are provided by the websites used to support this community. Nor does this code apply to communications or actions that take place outside of the context of this community. Many participants in this project are also subject to codes of conduct based on their employment. This code is a social-contract that informs participants of our social expectations. It is not a terms of service or legal contract.
## License and Acknowledgment.
This text is shared under the [CC-BY-4.0 license](https://creativecommons.org/licenses/by/4.0/). This code is based on a study conducted by the [TODO Group](https://todogroup.org/) of many codes used in the open source community. If you have feedback about this code, contact our Response Team at the address listed above.
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of this License; and
You must cause any modified files to carry prominent notices stating that You changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
================================================
FILE: NOTICE
================================================
=========================================================================
NOTICE file for use with, and corresponding to Section 4 of,
the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
in this case for the HaloDB project
=========================================================================
This project contains software developed by Robert Stupp.
OHC (https://github.com/snazy/ohc)
Java Off-Heap-Cache, licensed under APLv2
Copyright (C) 2014 Robert Stupp, Koeln, Germany, robert-stupp.de
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# HaloDB
[](https://travis-ci.org/yahoo/HaloDB)
[ ](https://bintray.com/yahoo/maven/halodb/_latestVersion)
HaloDB is a fast and simple embedded key-value store written in Java. HaloDB is suitable for IO bound workloads, and is capable of handling high throughput reads and writes at submillisecond latencies.
HaloDB was written for a high-throughput, low latency distributed key-value database that powers multiple ad platforms at Yahoo, therefore all its design choices and optimizations were
primarily for this use case.
Basic design principles employed in HaloDB are not new. Refer to this [document](docs/WhyHaloDB.md) for more details about the motivation for HaloDB and its inspirations.
HaloDB comprises of two main components: an index in memory which stores all the keys, and append-only log files on
the persistent layer which stores all the data. To reduce Java garbage collection pressure the index
is allocated in native memory, outside the Java heap.

### Basic Operations.
```java
// Open a db with default options.
HaloDBOptions options = new HaloDBOptions();
// Size of each data file will be 1GB.
options.setMaxFileSize(1024 * 1024 * 1024);
// Size of each tombstone file will be 64MB
// Large file size mean less file count but will slow down db open time. But if set
// file size too small, it will result large amount of tombstone files under db folder
options.setMaxTombstoneFileSize(64 * 1024 * 1024);
// Set the number of threads used to scan index and tombstone files in parallel
// to build in-memory index during db open. It must be a positive number which is
// not greater than Runtime.getRuntime().availableProcessors().
// It is used to speed up db open time.
options.setBuildIndexThreads(8);
// The threshold at which page cache is synced to disk.
// data will be durable only if it is flushed to disk, therefore
// more data will be lost if this value is set too high. Setting
// this value too low might interfere with read and write performance.
options.setFlushDataSizeBytes(10 * 1024 * 1024);
// The percentage of stale data in a data file at which the file will be compacted.
// This value helps control write and space amplification. Increasing this value will
// reduce write amplification but will increase space amplification.
// This along with the compactionJobRate below is the most important setting
// for tuning HaloDB performance. If this is set to x then write amplification
// will be approximately 1/x.
options.setCompactionThresholdPerFile(0.7);
// Controls how fast the compaction job should run.
// This is the amount of data which will be copied by the compaction thread per second.
// Optimal value depends on the compactionThresholdPerFile option.
options.setCompactionJobRate(50 * 1024 * 1024);
// Setting this value is important as it helps to preallocate enough
// memory for the off-heap cache. If the value is too low the db might
// need to rehash the cache. For a db of size n set this value to 2*n.
options.setNumberOfRecords(100_000_000);
// Delete operation for a key will write a tombstone record to a tombstone file.
// the tombstone record can be removed only when all previous version of that key
// has been deleted by the compaction job.
// enabling this option will delete during startup all tombstone records whose previous
// versions were removed from the data file.
options.setCleanUpTombstonesDuringOpen(true);
// HaloDB does native memory allocation for the in-memory index.
// Enabling this option will release all allocated memory back to the kernel when the db is closed.
// This option is not necessary if the JVM is shutdown when the db is closed, as in that case
// allocated memory is released automatically by the kernel.
// If using in-memory index without memory pool this option,
// depending on the number of records in the database,
// could be a slow as we need to call _free_ for each record.
options.setCleanUpInMemoryIndexOnClose(false);
// ** settings for memory pool **
options.setUseMemoryPool(true);
// Hash table implementation in HaloDB is similar to that of ConcurrentHashMap in Java 7.
// Hash table is divided into segments and each segment manages its own native memory.
// The number of segments is twice the number of cores in the machine.
// A segment's memory is further divided into chunks whose size can be configured here.
options.setMemoryPoolChunkSize(2 * 1024 * 1024);
// using a memory pool requires us to declare the size of keys in advance.
// Any write request with key length greater than the declared value will fail, but it
// is still possible to store keys smaller than this declared size.
options.setFixedKeySize(8);
// Represents a database instance and provides all methods for operating on the database.
HaloDB db = null;
// The directory will be created if it doesn't exist and all database files will be stored in this directory
String directory = "directory";
// Open the database. Directory will be created if it doesn't exist.
// If we are opening an existing database HaloDB needs to scan all the
// index files to create the in-memory index, which, depending on the db size, might take a few minutes.
db = HaloDB.open(directory, options);
// key and values are byte arrays. Key size is restricted to 128 bytes.
byte[] key1 = Ints.toByteArray(200);
byte[] value1 = "Value for key 1".getBytes();
byte[] key2 = Ints.toByteArray(300);
byte[] value2 = "Value for key 2".getBytes();
// add the key-value pair to the database.
db.put(key1, value1);
db.put(key2, value2);
// read the value from the database.
value1 = db.get(key1);
value2 = db.get(key2);
// delete a key from the database.
db.delete(key1);
// Open an iterator and iterate through all the key-value records.
HaloDBIterator iterator = db.newIterator();
while (iterator.hasNext()) {
Record record = iterator.next();
System.out.println(Ints.fromByteArray(record.getKey()));
System.out.println(new String(record.getValue()));
}
// get stats and print it.
HaloDBStats stats = db.stats();
System.out.println(stats.toString());
// reset stats
db.resetStats();
// pause background compaction thread.
// if a file is being compacted the thread
// will block until the compaction is complete.
db.pauseCompaction();
// resume background compaction thread.
db.resumeCompaction();
// repeatedly calling pause/resume compaction methods will have no effect.
// Close the database.
db.close();
```
Binaries for HaloDB are hosted on [Bintray](https://bintray.com/yahoo).
``` xml
<dependency>
<groupId>com.oath.halodb</groupId>
<artifactId>halodb</artifactId>
<version>x.y.x</version>
</dependency>
<repository>
<id>yahoo-bintray</id>
<name>yahoo-bintray</name>
<url>https://yahoo.bintray.com/maven</url>
</repository>
```
### Read, Write and Space amplification.
Read amplification in HaloDB is always 1—for a read request it needs to do at most one disk lookup—hence it is well suited for
read latency critical workloads. HaloDB provides a configuration which can be tuned to control write amplification
and space amplification, both of which trade-off with each other; HaloDB has a background compaction thread which removes stale data
from the DB. The percentage of stale data at which a file is compacted can be controlled. Increasing this value will increase space amplification
but will reduce write amplification. For example if the value is set to 50% then write amplification will be approximately 2
### Durability and Crash recovery.
Write Ahead Logs (WAL) are usually used by databases for crash recovery. Since for HaloDB WAL _is the_ database crash recovery
is easier and faster.
HaloDB does not flush writes to disk immediately, but, for performance reasons, writes only to the OS page cache. The cache is synced to
disk once a configurable size is reached. In the event of a power loss, the data not flushed to disk will be lost. This compromise
between performance and durability is a necessary one.
In the event of a power loss and data corruption, HaloDB will scan and discard corrupted records. Since the write thread and compaction
thread could be writing to at most two files at a time only those files need to be repaired and hence recovery times are very short.
In the event of a power loss HaloDB offers the following consistency guarantees:
* Writes are atomic.
* Inserts and updates are committed to disk in the same order they are received.
* When inserts/updates and deletes are interleaved total ordering is not guaranteed, but partial ordering is guaranteed for inserts/updates and deletes.
### In-memory index.
HaloDB stores all keys and their associated metadata in an index in memory. The size of this index, depending on the
number and length of keys, can be quite big. Therefore, storing this in the Java Heap is a non-starter for a
performance critical storage engine. HaloDB solves this problem by storing the index in native memory,
outside the heap. There are two variants of the index; one with a memory pool and the other
without it. Using the memory pool helps to reduce the memory footprint of the index and reduce
fragmentation, but requires fixed size keys. A billion 8 byte keys
currently takes around 44GB of memory with memory pool and around 64GB without memory pool.
The size of the keys when using a memory pool should be declared in advance, and although this imposes an
upper limit on the size of the keys it is still possible to store keys smaller than this declared size.
Without the memory pool, HaloDB needs to allocate native memory for every write request. Therefore,
memory fragmentation could be an issue. Using [jemalloc](http://jemalloc.net/) is highly recommended as it
provides a significant reduction in the cache's memory footprint and fragmentation.
### Delete operations.
Delete operation for a key will add a tombstone record to a tombstone file, which is distinct from the data files.
This design has the advantage that the tombstone record once written need not be copied again during compaction, but
the drawback is that in case of a power loss HaloDB cannot guarantee total ordering when put and delete operations are
interleaved (although partial ordering for both is guaranteed).
### DB open time
Open db could take a few minutes, depends on number of records and tombstones. If the db open time is critical to your
use case, please keep tombstone file size relatively small and increase the number of threads used in building index.
See the option setting section in example code above. As best practice, set tombstone file size at 64MB and set build
index threads to number of available processors divided by number of dbs being opened simultaneously.
### System requirements.
* HaloDB requires Java 8 to run, but has not yet been tested with newer Java versions.
* HaloDB has been tested on Linux running on x86 and on MacOS. It may run on other platforms, but this hasn't been verified yet.
* For performance disable Transparent Huge Pages and swapping (vm.swappiness=0).
* If a thread is interrupted JVM will close those file channels the thread was operating on.
Therefore, don't interrupt threads while they are doing IO operations.
### Restrictions.
* Size of keys is restricted to 128 bytes.
* HaloDB don't support range scans or ordered access.
# Benchmarks.
[Benchmarks](docs/benchmarks.md).
# Contributing
Contributions are most welcome. Please refer to the [CONTRIBUTING](https://github.com/yahoo/HaloDB/blob/master/CONTRIBUTING.md) guide
# Credits
HaloDB was written by [Arjun Mannaly](https://github.com/amannaly).
# License
HaloDB is released under the Apache License, Version 2.0
================================================
FILE: benchmarks/README.md
================================================
# Storage Engine Benchmark Tool.
Build the package using **mvn clean package** This will create a far jar *target/storage-engine-benchmark-1.0.jar*
Different benchmarks can be run using:
`java -jar storage-engine-benchmark-1.0-SNAPSHOT.jar <db directory> <benchmary type>`
Different benchmark types are defined [here](https://github.com/yahoo/HaloDB/blob/master/benchmarks/src/main/java/com/oath/halodb/benchmarks/Benchmarks.java).
================================================
FILE: benchmarks/pom.xml
================================================
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>mannaly</groupId>
<artifactId>storage-engine-benchmark</artifactId>
<version>1.0</version>
<packaging>jar</packaging>
<name>storage-engine-benchmark</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
<version>5.7.2</version>
</dependency>
<dependency>
<groupId>com.oath.halodb</groupId>
<artifactId>halodb</artifactId>
<version>0.4.2</version>
</dependency>
<dependency>
<groupId>com.fallabs</groupId>
<artifactId>kyotocabinet-java</artifactId>
<version>1.16</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>19.0</version>
</dependency>
<dependency>
<groupId>org.hdrhistogram</groupId>
<artifactId>HdrHistogram</artifactId>
<version>2.1.9</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.8.0-alpha2</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<!-- assembly plugin to create a fat jar. -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.3</version>
<executions>
<!-- Run shade goal on package phase -->
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<transformers>
<!-- add Main-Class to manifest file -->
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>com.oath.halodb.benchmarks.BenchmarkTool</mainClass>
</transformer>
</transformers>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<repositories>
<repository>
<id>halodb-bintray</id>
<name>halodb-bintray</name>
<url>https://yahoo.bintray.com/maven</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
</project>
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/BenchmarkTool.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
import com.google.common.primitives.Longs;
import com.google.common.util.concurrent.RateLimiter;
import org.HdrHistogram.Histogram;
import java.io.File;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.Random;
import java.util.concurrent.TimeUnit;
public class BenchmarkTool {
private static SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss");
// adjust HaloDB number of records accordingly.
private final static int numberOfRecords = 500_000_000;
private static volatile boolean isReadComplete = false;
private static final int numberOfReads = 640_000_000;
private static final int numberOfReadThreads = 32;
private static final int noOfReadsPerThread = numberOfReads / numberOfReadThreads; // 400 million.
private static final int writeMBPerSecond = 20 * 1024 * 1024;
private static final RateLimiter writeRateLimiter = RateLimiter.create(writeMBPerSecond);
private static final int recordSize = 1024;
private static final int seed = 100;
private static final Random random = new Random(seed);
private static RandomDataGenerator randomDataGenerator = new RandomDataGenerator(seed);
public static void main(String[] args) throws Exception {
String directoryName = args[0];
String benchmarkType = args[1];
Benchmarks benchmark = null;
try {
benchmark = Benchmarks.valueOf(benchmarkType);
}
catch (IllegalArgumentException e) {
System.out.println("Benchmarks should be one of " + Arrays.toString(Benchmarks.values()));
System.exit(1);
}
System.out.println("Running benchmark " + benchmark);
File dir = new File(directoryName);
// select different storage engines here.
final StorageEngine db = new HaloDBStorageEngine(dir, numberOfRecords);
//final StorageEngine db = new RocksDBStorageEngine(dir, numberOfRecords);
//final StorageEngine db = new KyotoStorageEngine(dir, numberOfRecords);
db.open();
System.out.println("Opened the database.");
switch (benchmark) {
case FILL_SEQUENCE: createDB(db, true);break;
case FILL_RANDOM: createDB(db, false);break;
case READ_RANDOM: readRandom(db, numberOfReadThreads);break;
case RANDOM_UPDATE: update(db);break;
case READ_AND_UPDATE: updateWithReads(db);
}
db.close();
}
private static void createDB(StorageEngine db, boolean isSequential) {
long start = System.currentTimeMillis();
byte[] value;
long dataSize = 0;
for (int i = 0; i < numberOfRecords; i++) {
value = randomDataGenerator.getData(recordSize);
dataSize += (long)value.length;
byte[] key = isSequential ? longToBytes(i) : longToBytes(random.nextInt(numberOfRecords));
db.put(key, value);
if (i % 1_000_000 == 0) {
System.out.printf("%s: Wrote %d records\n", DateFormat.getTimeInstance().format(new Date()), i);
}
}
long end = System.currentTimeMillis();
long time = (end - start) / 1000;
System.out.println("Completed writing data in " + time);
System.out.printf("Write rate %d MB/sec\n", dataSize / time / 1024l / 1024l);
System.out.println("Size of database " + db.size());
}
private static void update(StorageEngine db) {
long start = System.currentTimeMillis();
byte[] value;
long dataSize = 0;
for (int i = 0; i < numberOfRecords; i++) {
value = randomDataGenerator.getData(recordSize);
writeRateLimiter.acquire(value.length);
dataSize += (long)value.length;
byte[] key = longToBytes(random.nextInt(numberOfRecords));
db.put(key, value);
if (i % 1_000_000 == 0) {
System.out.printf("%s: Wrote %d records\n", DateFormat.getTimeInstance().format(new Date()), i);
}
}
long end = System.currentTimeMillis();
long time = (end - start) / 1000;
System.out.println("Completed over writing data in " + time);
System.out.printf("Write rate %d MB/sec\n", dataSize / time / 1024l / 1024l);
System.out.println("Size of database " + db.size());
}
private static void readRandom(StorageEngine db, int threads) {
Read[] reads = new Read[numberOfReadThreads];
long start = System.currentTimeMillis();
for (int i = 0; i < reads.length; i++) {
reads[i] = new Read(db, i);
reads[i].start();
}
for (Read r : reads) {
try {
r.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
long time = (System.currentTimeMillis() - start) / 1000;
System.out.printf("Completed %d reads with %d threads in %d seconds\n", numberOfReads, numberOfReadThreads, time);
System.out.println("Operations per second - " + numberOfReads/time);
Histogram latencyHistogram = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
for(Read r : reads) {
latencyHistogram.add(r.latencyHistogram);
}
System.out.printf("Max value - %d\n", latencyHistogram.getMaxValue());
System.out.printf("Average value - %f\n", latencyHistogram.getMean());
System.out.printf("95th percentile - %d\n", latencyHistogram.getValueAtPercentile(95.0));
System.out.printf("99th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.0));
System.out.printf("99.9th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.9));
System.out.printf("99.99th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.99));
}
private static void updateWithReads(StorageEngine db) {
Read[] reads = new Read[numberOfReadThreads];
Thread update = new Thread(new Runnable() {
@Override
public void run() {
long start = System.currentTimeMillis();
byte[] value;
long dataSize = 0, count = 0;
while (!isReadComplete) {
value = randomDataGenerator.getData(recordSize);
writeRateLimiter.acquire(value.length);
dataSize += (long)value.length;
byte[] key = longToBytes(random.nextInt(numberOfRecords));
db.put(key, value);
if (count++ % 1_000_000 == 0) {
System.out.printf("%s: Wrote %d records\n", DateFormat.getTimeInstance().format(new Date()), count);
}
}
long end = System.currentTimeMillis();
long time = (end - start) / 1000;
System.out.println("Completed over writing data in " + time);
System.out.println("Write operations per second - " + count/time);
System.out.printf("Write rate %d MB/sec\n", dataSize / time / 1024l / 1024l);
System.out.println("Size of database " + db.size());
}
});
long start = System.currentTimeMillis();
for (int i = 0; i < reads.length; i++) {
reads[i] = new Read(db, i);
reads[i].start();
}
update.start();
for(Read r : reads) {
try {
r.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
long time = (System.currentTimeMillis() - start) / 1000;
isReadComplete = true;
long maxTime = -1;
for (Read r : reads) {
maxTime = Math.max(maxTime, r.time);
}
maxTime = maxTime / 1000;
System.out.println("Maximum time taken by a read thread to complete - " + maxTime);
System.out.printf("Completed %d reads with %d threads in %d seconds\n", numberOfReads, numberOfReadThreads, time);
System.out.println("Read operations per second - " + numberOfReads/time);
Histogram latencyHistogram = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
for(Read r : reads) {
latencyHistogram.add(r.latencyHistogram);
}
System.out.printf("Max value - %d\n", latencyHistogram.getMaxValue());
System.out.printf("Average value - %f\n", latencyHistogram.getMean());
System.out.printf("95th percentile - %d\n", latencyHistogram.getValueAtPercentile(95.0));
System.out.printf("99th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.0));
System.out.printf("99.9th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.9));
System.out.printf("99.99th percentile - %d\n", latencyHistogram.getValueAtPercentile(99.99));
}
static class Read extends Thread {
final int id;
final Random rand;
final StorageEngine db;
long time;
Histogram latencyHistogram = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
Read(StorageEngine db, int id) {
this.db = db;
this.id = id;
rand = new Random(seed + id);
}
@Override
public void run() {
long sum = 0, count = 0;
long start = System.currentTimeMillis();
while (count < noOfReadsPerThread) {
long id = (long)rand.nextInt(numberOfRecords);
long s = System.nanoTime();
byte[] value = db.get(longToBytes(id));
latencyHistogram.recordValue(System.nanoTime()-s);
count++;
if (value == null) {
System.out.println("NO value for key " +id);
continue;
}
if (count % 1_000_000 == 0) {
System.out.printf(printDate() + "Read: %d Completed %d reads\n", this.id, count);
}
sum += value.length;
}
time = (System.currentTimeMillis() - start);
System.out.printf("Read: %d Completed in time %d\n", id, time);
}
}
public static byte[] longToBytes(long value) {
return Longs.toByteArray(value);
}
public static String printDate() {
return sdf.format(new Date()) + ": ";
}
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/Benchmarks.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
public enum Benchmarks {
FILL_SEQUENCE,
FILL_RANDOM,
READ_RANDOM,
RANDOM_UPDATE,
READ_AND_UPDATE;
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/HaloDBStorageEngine.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
import com.google.common.primitives.Ints;
import com.oath.halodb.HaloDB;
import com.oath.halodb.HaloDBException;
import com.oath.halodb.HaloDBOptions;
import java.io.File;
public class HaloDBStorageEngine implements StorageEngine {
private final File dbDirectory;
private HaloDB db;
private final long noOfRecords;
public HaloDBStorageEngine(File dbDirectory, long noOfRecords) {
this.dbDirectory = dbDirectory;
this.noOfRecords = noOfRecords;
}
@Override
public void put(byte[] key, byte[] value) {
try {
db.put(key, value);
} catch (HaloDBException e) {
e.printStackTrace();
}
}
@Override
public byte[] get(byte[] key) {
try {
return db.get(key);
} catch (HaloDBException e) {
e.printStackTrace();
}
return new byte[0];
}
@Override
public void delete(byte[] key) {
try {
db.delete(key);
} catch (HaloDBException e) {
e.printStackTrace();
}
}
@Override
public void open() {
HaloDBOptions opts = new HaloDBOptions();
opts.setMaxFileSize(1024*1024*1024);
opts.setCompactionThresholdPerFile(0.50);
opts.setFlushDataSizeBytes(10 * 1024 * 1024);
opts.setNumberOfRecords(Ints.checkedCast(2 * noOfRecords));
opts.setCompactionJobRate(135 * 1024 * 1024);
opts.setUseMemoryPool(true);
opts.setFixedKeySize(8);
try {
db = HaloDB.open(dbDirectory, opts);
} catch (HaloDBException e) {
e.printStackTrace();
}
}
@Override
public void close() {
if (db != null){
try {
db.close();
} catch (HaloDBException e) {
e.printStackTrace();
}
}
}
@Override
public long size() {
return db.size();
}
@Override
public void printStats() {
}
@Override
public String stats() {
return db.stats().toString();
}
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/KyotoStorageEngine.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
import java.io.File;
import kyotocabinet.DB;
public class KyotoStorageEngine implements StorageEngine {
private final File dbDirectory;
private final int noOfRecords;
private final DB db = new DB(2);
public KyotoStorageEngine(File dbDirectory, int noOfRecords) {
this.dbDirectory = dbDirectory;
this.noOfRecords = noOfRecords;
}
@Override
public void open() {
int mode = DB.OWRITER | DB.OCREATE | DB.ONOREPAIR;
StringBuilder fileNameBuilder = new StringBuilder();
fileNameBuilder.append(dbDirectory.getPath()).append("/kyoto.kch");
// specifies the power of the alignment of record size
fileNameBuilder.append("#apow=").append(8);
// specifies the number of buckets of the hash table
fileNameBuilder.append("#bnum=").append(noOfRecords * 4);
// specifies the mapped memory size
fileNameBuilder.append("#msiz=").append(2_500_000_000l);
// specifies the unit step number of auto defragmentation
fileNameBuilder.append("#dfunit=").append(8);
System.out.printf("Creating %s\n", fileNameBuilder.toString());
if (!db.open(fileNameBuilder.toString(), mode)) {
throw new IllegalArgumentException(String.format("KC db %s open error: " + db.error(),
fileNameBuilder.toString()));
}
}
@Override
public void put(byte[] key, byte[] value) {
db.set(key, value);
}
@Override
public byte[] get(byte[] key) {
return db.get(key);
}
@Override
public void close() {
db.close();
}
@Override
public long size() {
return db.size();
}
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/RandomDataGenerator.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
import java.util.Random;
public class RandomDataGenerator {
private final byte[] data;
private static final int size = 1003087;
private int position = 0;
public RandomDataGenerator(int seed) {
this.data = new byte[size];
Random random = new Random(seed);
random.nextBytes(data);
}
public byte[] getData(int length) {
byte[] b = new byte[length];
for (int i = 0; i < length; i++) {
if (position >= size) {
position = 0;
}
b[i] = data[position++];
}
return b;
}
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/RocksDBStorageEngine.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
import org.rocksdb.CompressionType;
import org.rocksdb.Env;
import org.rocksdb.Options;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.rocksdb.Statistics;
import org.rocksdb.WriteOptions;
import java.io.File;
import java.util.Arrays;
import java.util.List;
public class RocksDBStorageEngine implements StorageEngine {
private RocksDB db;
private Options options;
private Statistics statistics;
private WriteOptions writeOptions;
private final File dbDirectory;
public RocksDBStorageEngine(File dbDirectory, int noOfRecords) {
this.dbDirectory = dbDirectory;
}
@Override
public void put(byte[] key, byte[] value) {
try {
db.put(writeOptions, key, value);
} catch (RocksDBException e) {
e.printStackTrace();
}
}
@Override
public byte[] get(byte[] key) {
byte[] value = null;
try {
value = db.get(key);
} catch (RocksDBException e) {
e.printStackTrace();
}
return value;
}
@Override
public void open() {
options = new Options().setCreateIfMissing(true);
options.setStatsDumpPeriodSec(1000000);
options.setWriteBufferSize(128l * 1024 * 1024);
options.setMaxWriteBufferNumber(3);
options.setMaxBackgroundCompactions(20);
Env env = Env.getDefault();
env.setBackgroundThreads(20, Env.COMPACTION_POOL);
options.setEnv(env);
// max size of L1 10 MB.
options.setMaxBytesForLevelBase(10485760);
options.setTargetFileSizeBase(67108864);
options.setLevel0FileNumCompactionTrigger(4);
options.setLevel0SlowdownWritesTrigger(6);
options.setLevel0StopWritesTrigger(12);
options.setNumLevels(6);
options.setDeleteObsoleteFilesPeriodMicros(300000000);
options.setAllowMmapReads(false);
options.setCompressionType(CompressionType.SNAPPY_COMPRESSION);
System.out.printf("maxBackgroundCompactions %d \n", options.maxBackgroundCompactions());
System.out.printf("minWriteBufferNumberToMerge %d \n", options.minWriteBufferNumberToMerge());
System.out.printf("maxWriteBufferNumberToMaintain %d \n", options.maxWriteBufferNumberToMaintain());
System.out.printf("level0FileNumCompactionTrigger %d \n", options.level0FileNumCompactionTrigger());
System.out.printf("maxBytesForLevelBase %d \n", options.maxBytesForLevelBase());
System.out.printf("maxBytesForLevelMultiplier %f \n", options.maxBytesForLevelMultiplier());
System.out.printf("targetFileSizeBase %d \n", options.targetFileSizeBase());
System.out.printf("targetFileSizeMultiplier %d \n", options.targetFileSizeMultiplier());
List<CompressionType> compressionLevels =
Arrays.asList(
CompressionType.NO_COMPRESSION,
CompressionType.NO_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION
);
options.setCompressionPerLevel(compressionLevels);
System.out.printf("compressionPerLevel %s \n", options.compressionPerLevel());
System.out.printf("numLevels %s \n", options.numLevels());
writeOptions = new WriteOptions();
writeOptions.setDisableWAL(true);
System.out.printf("WAL is disabled - %s \n", writeOptions.disableWAL());
try {
db = RocksDB.open(options, dbDirectory.getPath());
} catch (RocksDBException e) {
e.printStackTrace();
}
}
@Override
public void close() {
//statistics.close();
options.close();
writeOptions.close();
db.close();
}
}
================================================
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/StorageEngine.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb.benchmarks;
public interface StorageEngine {
void put(byte[] key, byte[] value);
default String stats() { return "";}
byte[] get(byte[] key);
default void delete(byte[] key) {};
void open();
void close();
default long size() {return 0;}
default void printStats() {
}
}
================================================
FILE: docs/WhyHaloDB.md
================================================
# HaloDB at Yahoo.
At Yahoo, we built this high throughput, low latency distributed key-value database that runs in multiple data centers in different parts for the world.
The database stores billions of records and handles millions of read and write requests per second with an SLA of 1 millisecond at the 99th percentile.
The data we have in this database must be persistent, and the working set is larger than what we can fit in memory.
Therefore, a key component of the database’s performance is a fast storage engine, for which we have relied on Kyoto Cabinet. Although Kyoto Cabinet has served us well,
it was designed primarily for a read-heavy workload and its write throughput started to be a bottleneck as we took on more write traffic.
There were also other issues we faced with Kyoto Cabinet; it takes up to an hour to repair a corrupted db, and takes hours to iterate over and update/delete records (which we have to do every night).
It also doesn't expose enough operational metrics or logs which makes resolving issues challenging. However, our primary concern was Kyoto Cabinet’s write performance,
which based on our projections, would have been a major obstacle for scaling the database; therefore, it was a good time to look for alternatives.
**These are the salient features of the database’s workload for which the storage engine will be used:**
* Small keys (8 bytes) and large values (10KB average)
* Both read and write throughput are high.
* Submillisecond read latency at the 99th percentile.
* Single writer thread.
* No need for ordered access or range scans.
* Working set is much larger than available memory, hence workload is IO bound.
* Database is written in Java.
## Why a new storage engine?
Although there are umpteen number of storage engines publicly available almost all use a variation of the following data structures to organize data on disk for fast lookup:
* __Hash table__: Kyoto Cabinet.
* __Log-structured merge tree__: LevelDB, RocksDB.
* __B-Tree/B+ Tree__: Berkeley DB, InnoDB.
Since our workload requires very high write throughput, Hash table and B-Tree based storage engines were not suitable as they need to do random writes.
Although modern SSDs have narrowed the gap between sequential and random write performance, sequential writes still have higher throughput, primarily due
to the reduced internal garbage collection load within the SSD. LSM trees also turned out to be unsuitable; benchmarking RocksDB on our workload showed
a write amplification of 10-12, therefore writing 100MB/sec to RocksDB meant that it will write more than 1 GB/sec to the SSD, clearly too high.
High write amplification of RocksDB is a property of the LSM data structure itself, thereby ruling out storage engines based on LSM trees.
LSM tree and B-Tree also maintain an ordering of keys to support efficient range scans, but the cost they pay is a read amplification greater than 1,
and for LSM tree, very high write amplification. Since our workload only does point lookups, we don’t want to pay the cost associated with storing data
in a format suitable for range scans.
These problems ruled out most of the publicly available and well maintained storage engines. Looking at alternate storage engine data structures led us to
explore ideas used in Log-structured storage systems. Here was a potential good fit; log-structured system only does sequential writes, an efficient
garbage collection implementation can keep write amplification low, and having an index in memory for the keys can give us a read amplification of one,
and we get transactional updates, snapshots, and quick crash recovery almost for free. Also in this scheme, there is no ordering of data and hence its
associated costs are not paid. We found that similar ideas have been used in [BitCask](https://github.com/basho/bitcask/blob/develop/doc/bitcask-intro.pdf)
and [Haystack](https://code.facebook.com/posts/685565858139515/needle-in-a-haystack-efficient-storage-of-billions-of-photos/).
But BitCask was written in Erlang, and since our database runs on the JVM running Erlang VM on the same box and talking to it from the JVM is something
that we didn’t want to do. Haystack, on the other hand, is a full-fledged distributed database optimized for storing photos, and its storage engine hasn’t been open sourced.
Therefore it was decided to write a new storage engine from scratch; thus the HaloDB project was initiated.
## Performance test results on our production workload.
The following chart shows the results of performance tests that we ran with production data against a performance test box with the same hardware as production boxes. The read requests were kept at 50,000 QPS while the write QPS was increased.

As you can see at the 99th percentile HaloDB read latency is an order of magnitude better than that of Kyoto Cabinet.
We recently upgraded our SSDs to PCIe NVMe SSDs. This has given us a significant performance boost and has narrowed the gap between HaloDB and Kyoto Cabinet,
but the difference is still significant:

Of course, these are results from performance tests, but nothing beats real data from hosts running in production.
Following chart shows the 99th percentile latency from a production server before and after migration to HaloDB.

HaloDB has thus given our production boxes a 50% improvement in capacity while consistently maintaining a sub-millisecond latency at the 99th percentile.
HaloDB also has fixed few other problems that we had with KyotoCabinet. The daily cleanup job that used to take upto 5 hours in Kyoto Cabinet is now complete in 90 minutes
with HaloDB due to its improved write throughput. Also, HaloDB takes only a few seconds to recover from a crash due to the fact that all log files,
once they are rolled over, are immutable. Hence, in the event of a crash only the last file that was being written to need to be repaired.
Whereas, with Kyoto Cabinet crash recovery used to take more than an hour to complete. And the metrics that HaloDB exposes gives us good insight into its internal state,
which was missing with Kyoto Cabinet.
================================================
FILE: docs/benchmarks.md
================================================
# Benchmarks
Benchmarks were run to compare HaloDB against RocksDB and KyotoCabinet.
KyotoCabinet was chosen as we were using it in production. RockDB was chosen as it is a well known storage engine
with good documentation and a large community. HaloDB and KyotoCabinet supports only a subset of RocksDB's features, therefore the comparison is not exactly fair to RocksDB.
All benchmarks were run on bare-metal box with the following specifications:
* 2 x Xeon E5-2680 2.50GHz (HT enabled, 24 cores, 48 threads)
* 128 GB of RAM.
* 1 Samsung PM863 960 GB SSD with XFS file system.
* RHEL 6 with kernel 2.6.32.
Key size was 8 bytes and value size 1024 bytes. Tests created a db with 500 million records with total size of approximately
500GB. Since this is significantly bigger than the available memory it will ensure that the workload will be IO bound, which is what HaloDB was primarily designed for.
Benchmark tool can be found [here](../benchmarks)
## Test 1: Fill Sequential.
Create a new db by inserting 500 million records in sorted key order.

DB size at the end of the test run.
| Storage Engine | GB |
| ------------- | --------- |
| HaloDB | 503 |
| KyotoCabinet | 609 |
| RocksDB | 487 |
## Test 2: Random Read
Measure random read performance with 32 threads doing _640 million reads_ in total. Read ahead was disabled for this test.

## Test 3: Random Update.
Perform 500 million updates to randomly selected records.

DB size at the end of the test run.
| Storage Engine | GB |
| ------------- | --------- |
| HaloDB | 556 |
| KyotoCabinet | 609 |
| RocksDB | 504 |
## Test 4: Fill Random.
Insert 500 million records into an empty db in random order.

## Test 5: Read and update.
32 threads doing a total of 640 million random reads and one thread doing random updates as fast as possible.

## Why HaloDB is fast.
HaloDB doesn't claim to be always better than RocksDB or KyotoCabinet. HaloDB was written for a specific type of workload, and therefore had
the advantage of optimizing for that workload; the trade-offs that HaloDB makes might make it sub-optimal for other workloads (best to run benchmarks to verify).
HaloDB also offers only a small subset of features compared to other storage engines like RocksDB.
All writes to HaloDB are sequential writes to append-only log files. HaloDB uses a background compaction job to clean up stale data.
The threshold at which a file is compacted can be tuned and this determines HaloDB's write amplification and space amplification.
A compaction threshold of 50% gives a write amplification of only 2, this coupled with the fact that we do only sequential writes
are the primary reasons for HaloDB’s high write throughput. Additionally, the only meta-data that HaloDB need to modify during writes are
those of the index in memory. The trade-off here is that HaloDB will occupy more space on disk.
To lookup the value for a key its corresponding metadata is first read from the in-memory index and then the value is read from disk.
Therefore each lookup request requires at most a single read from disk, giving us a read amplification of 1, and is primarily responsible
for HaloDB’s low read latencies. The trade-off here is that we need to store all the keys and their associated metadata in memory. HaloDB
also need to scan all the keys during startup to build the in-memory index. This, depending on the number of keys, might take a few minutes.
HaloDB avoids doing in-place updates and doesn't need record level locks. A type of MVCC is inherent in the design of all log-structured storage systems. This also helps with performance even under high read and write throughput.
HaloDB also doesn't support range scans and therefore doesn't pay the cost associated with storing data in a format suitable for efficient range scans.
================================================
FILE: pom.xml
================================================
<!--
~ Copyright 2018, Oath Inc
~ Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.oath.halodb</groupId>
<artifactId>halodb</artifactId>
<version>0.5.6</version>
<packaging>jar</packaging>
<name>HaloDB</name>
<description>A fast, embedded, persistent key-value storage engine.</description>
<url>http://maven.apache.org</url>
<developers>
<developer>
<name>Arjun Mannaly</name>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.12</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>18.0</version>
</dependency>
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>net.jpountz.lz4</groupId>
<artifactId>lz4</artifactId>
<optional>true</optional>
<version>1.3</version>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>1.3</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.3</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<version>2.3</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.9.10</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.jmockit</groupId>
<artifactId>jmockit</artifactId>
<version>1.38</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>3.8.0</version>
<scope>test</scope>
</dependency>
</dependencies>
<scm>
<url>https://github.com/yahoo/HaloDB</url>
<developerConnection>scm:git:git@github.com:yahoo/HaloDB.git</developerConnection>
<tag>HEAD</tag>
</scm>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.20.1</version>
<configuration>
<properties>
<property>
<name>listener</name>
<value>com.oath.halodb.TestListener</value>
</property>
</properties>
<argLine>-Xms2G -Xmx2G</argLine>
</configuration>
</plugin>
<plugin>
<artifactId>maven-release-plugin</artifactId>
<version>2.5.3</version>
<configuration>
<tagNameFormat>v@{project.version}</tagNameFormat>
</configuration>
</plugin>
</plugins>
<resources>
<resource>
<directory>config</directory>
<includes>
<include>*.properties</include>
</includes>
</resource>
</resources>
</build>
<distributionManagement>
<repository>
<id>github</id>
<url>https://maven.pkg.github.com/yahoo/halodb</url>
</repository>
</distributionManagement>
</project>
================================================
FILE: src/main/java/com/oath/halodb/CompactionManager.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.RateLimiter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.locks.ReentrantLock;
class CompactionManager {
private static final Logger logger = LoggerFactory.getLogger(CompactionManager.class);
private final HaloDBInternal dbInternal;
private volatile boolean isRunning = false;
private final RateLimiter compactionRateLimiter;
private volatile HaloDBFile currentWriteFile = null;
private int currentWriteFileOffset = 0;
private final BlockingQueue<Integer> compactionQueue;
private volatile CompactionThread compactionThread;
private volatile long numberOfRecordsCopied = 0;
private volatile long numberOfRecordsReplaced = 0;
private volatile long numberOfRecordsScanned = 0;
private volatile long sizeOfRecordsCopied = 0;
private volatile long sizeOfFilesDeleted = 0;
private volatile long totalSizeOfRecordsCopied = 0;
private volatile long compactionStartTime = System.currentTimeMillis();
private static final int STOP_SIGNAL = -10101;
private final ReentrantLock startStopLock = new ReentrantLock();
private volatile boolean stopInProgress = false;
CompactionManager(HaloDBInternal dbInternal) {
this.dbInternal = dbInternal;
this.compactionRateLimiter = RateLimiter.create(dbInternal.options.getCompactionJobRate());
this.compactionQueue = new LinkedBlockingQueue<>();
}
// If a file is being compacted we wait for it complete before stopping.
boolean stopCompactionThread(boolean closeCurrentWriteFile) throws IOException {
stopInProgress = true;
startStopLock.lock();
try {
isRunning = false;
if (isCompactionRunning()) {
// We don't want to call interrupt on compaction thread as it
// may interrupt IO operations and leave files in an inconsistent state.
// instead we use -10101 as a stop signal.
compactionQueue.put(STOP_SIGNAL);
compactionThread.join();
if (closeCurrentWriteFile && currentWriteFile != null) {
currentWriteFile.flushToDisk();
currentWriteFile.getIndexFile().flushToDisk();
currentWriteFile.close();
}
}
}
catch (InterruptedException e) {
logger.error("Error while waiting for compaction thread to stop", e);
return false;
}
finally {
stopInProgress = false;
startStopLock.unlock();
}
return true;
}
void startCompactionThread() {
startStopLock.lock();
try {
if (!isCompactionRunning()) {
isRunning = true;
compactionThread = new CompactionThread();
compactionThread.start();
}
} finally {
startStopLock.unlock();
}
}
void pauseCompactionThread() throws IOException {
logger.info("Pausing compaction thread ...");
stopCompactionThread(false);
}
void resumeCompaction() {
logger.info("Resuming compaction thread");
startCompactionThread();
}
int getCurrentWriteFileId() {
return currentWriteFile != null ? currentWriteFile.getFileId() : -1;
}
boolean submitFileForCompaction(int fileId) {
return compactionQueue.offer(fileId);
}
int noOfFilesPendingCompaction() {
return compactionQueue.size();
}
long getNumberOfRecordsCopied() {
return numberOfRecordsCopied;
}
long getNumberOfRecordsReplaced() {
return numberOfRecordsReplaced;
}
long getNumberOfRecordsScanned() {
return numberOfRecordsScanned;
}
long getSizeOfRecordsCopied() {
return sizeOfRecordsCopied;
}
long getSizeOfFilesDeleted() {
return sizeOfFilesDeleted;
}
long getCompactionJobRateSinceBeginning() {
long timeInSeconds = (System.currentTimeMillis() - compactionStartTime)/1000;
long rate = 0;
if (timeInSeconds > 0) {
rate = totalSizeOfRecordsCopied / timeInSeconds;
}
return rate;
}
void resetStats() {
numberOfRecordsCopied = numberOfRecordsReplaced
= numberOfRecordsScanned = sizeOfRecordsCopied = sizeOfFilesDeleted = 0;
}
boolean isCompactionRunning() {
return compactionThread != null && compactionThread.isAlive();
}
private class CompactionThread extends Thread {
private long unFlushedData = 0;
CompactionThread() {
super("CompactionThread");
setUncaughtExceptionHandler((t, e) -> {
logger.error("Compaction thread crashed", e);
if (currentWriteFile != null) {
try {
currentWriteFile.flushToDisk();
} catch (IOException ex) {
logger.error("Error while flushing " + currentWriteFile.getFileId() + " to disk", ex);
}
currentWriteFile = null;
}
currentWriteFileOffset = 0;
if (!stopInProgress) {
startStopLock.lock();
try {
compactionThread = null;
startCompactionThread();
} finally {
startStopLock.unlock();
}
}
else {
logger.info("Not restarting thread as the lock is held by stop compaction method.");
}
});
}
@Override
public void run() {
logger.info("Starting compaction thread ...");
int fileToCompact = -1;
while (isRunning) {
try {
fileToCompact = compactionQueue.take();
if (fileToCompact == STOP_SIGNAL) {
logger.debug("Received a stop signal.");
// skip rest of the steps and check status of isRunning flag.
// while pausing/stopping compaction isRunning flag must be set to false.
continue;
}
logger.debug("Compacting {} ...", fileToCompact);
copyFreshRecordsToNewFile(fileToCompact);
logger.debug("Completed compacting {} to {}", fileToCompact, getCurrentWriteFileId());
dbInternal.markFileAsCompacted(fileToCompact);
dbInternal.deleteHaloDBFile(fileToCompact);
}
catch (Exception e) {
logger.error(String.format("Error while compacting file %d to %d", fileToCompact, getCurrentWriteFileId()), e);
}
}
logger.info("Compaction thread stopped.");
}
// TODO: group and move adjacent fresh records together for performance.
private void copyFreshRecordsToNewFile(int idOfFileToCompact) throws IOException {
HaloDBFile fileToCompact = dbInternal.getHaloDBFile(idOfFileToCompact);
if (fileToCompact == null) {
logger.debug("File doesn't exist, was probably compacted already.");
return;
}
FileChannel readFrom = fileToCompact.getChannel();
IndexFile.IndexFileIterator iterator = fileToCompact.getIndexFile().newIterator();
long recordsCopied = 0, recordsScanned = 0;
while (iterator.hasNext()) {
IndexFileEntry indexFileEntry = iterator.next();
byte[] key = indexFileEntry.getKey();
long recordOffset = indexFileEntry.getRecordOffset();
int recordSize = indexFileEntry.getRecordSize();
recordsScanned++;
InMemoryIndexMetaData currentRecordMetaData = dbInternal.getInMemoryIndex().get(key);
if (isRecordFresh(indexFileEntry, currentRecordMetaData, idOfFileToCompact)) {
recordsCopied++;
compactionRateLimiter.acquire(recordSize);
rollOverCurrentWriteFile(recordSize);
sizeOfRecordsCopied += recordSize;
totalSizeOfRecordsCopied += recordSize;
// fresh record, copy to merged file.
long transferred = readFrom.transferTo(recordOffset, recordSize, currentWriteFile.getChannel());
//TODO: for testing. remove.
if (transferred != recordSize) {
logger.error("Had to transfer {} but only did {}", recordSize, transferred);
}
unFlushedData += transferred;
if (dbInternal.options.getFlushDataSizeBytes() != -1 &&
unFlushedData > dbInternal.options.getFlushDataSizeBytes()) {
currentWriteFile.getChannel().force(false);
unFlushedData = 0;
}
IndexFileEntry newEntry = new IndexFileEntry(
key, recordSize, currentWriteFileOffset,
indexFileEntry.getSequenceNumber(), indexFileEntry.getVersion(), -1
);
currentWriteFile.getIndexFile().write(newEntry);
int valueOffset = Utils.getValueOffset(currentWriteFileOffset, key);
InMemoryIndexMetaData newMetaData = new InMemoryIndexMetaData(
currentWriteFile.getFileId(), valueOffset,
currentRecordMetaData.getValueSize(), indexFileEntry.getSequenceNumber()
);
boolean updated = dbInternal.getInMemoryIndex().replace(key, currentRecordMetaData, newMetaData);
if (updated) {
numberOfRecordsReplaced++;
}
else {
// write thread wrote a new version while this version was being compacted.
// therefore, this version is stale.
dbInternal.addFileToCompactionQueueIfThresholdCrossed(currentWriteFile.getFileId(), recordSize);
}
currentWriteFileOffset += recordSize;
currentWriteFile.setWriteOffset(currentWriteFileOffset);
}
}
if (recordsCopied > 0) {
// After compaction we will delete the stale file.
// To prevent data loss in the event of a crash we need to ensure that copied data has hit the disk.
currentWriteFile.flushToDisk();
}
numberOfRecordsCopied += recordsCopied;
numberOfRecordsScanned += recordsScanned;
sizeOfFilesDeleted += fileToCompact.getSize();
logger.debug("Scanned {} records in file {} and copied {} records to {}.datac", recordsScanned, idOfFileToCompact, recordsCopied, getCurrentWriteFileId());
}
private boolean isRecordFresh(IndexFileEntry entry, InMemoryIndexMetaData metaData, int idOfFileToMerge) {
return metaData != null
&& metaData.getFileId() == idOfFileToMerge
&& metaData.getValueOffset() == Utils.getValueOffset(entry.getRecordOffset(), entry.getKey());
}
private void rollOverCurrentWriteFile(int recordSize) throws IOException {
if (currentWriteFile == null || currentWriteFileOffset + recordSize > dbInternal.options
.getMaxFileSize()) {
forceRolloverCurrentWriteFile();
}
}
}
void forceRolloverCurrentWriteFile() throws IOException {
if (currentWriteFile != null) {
currentWriteFile.flushToDisk();
currentWriteFile.getIndexFile().flushToDisk();
}
currentWriteFile = dbInternal.createHaloDBFile(HaloDBFile.FileType.COMPACTED_FILE);
dbInternal.getDbDirectory().syncMetaData();
currentWriteFileOffset = 0;
}
// Used only for tests. to be called only after all writes in the test have been performed.
@VisibleForTesting
synchronized boolean isCompactionComplete() {
if (!isCompactionRunning())
return true;
if (compactionQueue.isEmpty()) {
try {
isRunning = false;
submitFileForCompaction(STOP_SIGNAL);
compactionThread.join();
} catch (InterruptedException e) {
logger.error("Error in isCompactionComplete", e);
}
return true;
}
return false;
}
}
================================================
FILE: src/main/java/com/oath/halodb/Constants.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import java.util.regex.Pattern;
class Constants {
// matches data and compacted files with extension .data and .datac respectively.
static final Pattern DATA_FILE_PATTERN = Pattern.compile("([0-9]+)" + HaloDBFile.DATA_FILE_NAME + "c?");
static final Pattern INDEX_FILE_PATTERN = Pattern.compile("([0-9]+)" + IndexFile.INDEX_FILE_NAME);
static final Pattern TOMBSTONE_FILE_PATTERN = Pattern.compile("([0-9]+)" + TombstoneFile.TOMBSTONE_FILE_NAME);
static final Pattern STORAGE_FILE_PATTERN = Pattern.compile("([0-9]+)\\.[a-z]+");
}
================================================
FILE: src/main/java/com/oath/halodb/DBDirectory.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import java.io.File;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.List;
/**
* Represents the top level directory for a HaloDB instance.
*/
class DBDirectory {
private final File dbDirectory;
private final FileChannel directoryChannel;
private DBDirectory(File dbDirectory, FileChannel directoryChannel) {
this.dbDirectory = dbDirectory;
this.directoryChannel = directoryChannel;
}
/**
* Will create a new directory if one doesn't already exist.
*/
static DBDirectory open(File directory) throws IOException {
FileUtils.createDirectoryIfNotExists(directory);
FileChannel channel = null;
try {
channel = openReadOnlyChannel(directory);
}
catch(IOException e) {
// only swallow the exception if its Windows
if (!isWindows()) {
throw e;
}
}
return new DBDirectory(directory, channel);
}
void close() throws IOException {
if (directoryChannel != null) {
directoryChannel.close();
}
}
Path getPath() {
return dbDirectory.toPath();
}
File[] listDataFiles() {
return FileUtils.listDataFiles(dbDirectory);
}
List<Integer> listIndexFiles() {
return FileUtils.listIndexFiles(dbDirectory);
}
File[] listTombstoneFiles() {
return FileUtils.listTombstoneFiles(dbDirectory);
}
void syncMetaData() throws IOException {
if (directoryChannel != null) {
directoryChannel.force(true);
}
}
/**
* In Linux the recommended way to flush directory metadata is to open a
* file descriptor for the directory and to call fsync on it. In Java opening a read-only file channel
* and calling force(true) will do the same for us. But this is an undocumented behavior
* in Java and could change in future versions.
* https://grokbase.com/t/lucene/dev/1519kz2s50/recent-java-9-commit-e5b66323ae45-breaks-fsync-on-directory
*
* This currently works on Linux and OSX but may not work on other platforms. Therefore, if there is
* an exception we silently swallow it.
*/
private static FileChannel openReadOnlyChannel(File dbDirectory) throws IOException {
return FileChannel.open(dbDirectory.toPath(), StandardOpenOption.READ);
}
private static boolean isWindows() {
return System.getProperty("os.name", "generic").toLowerCase(java.util.Locale.ENGLISH).indexOf("win") != -1;
}
}
================================================
FILE: src/main/java/com/oath/halodb/DBMetaData.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.SeekableByteChannel;
import java.nio.file.*;
import java.util.zip.CRC32;
import static java.nio.file.StandardOpenOption.*;
import static java.nio.file.StandardCopyOption.*;
/**
* Represents the Metadata for the DB, stored in METADATA_FILE_NAME,
* and contains methods to operate on it.
*/
class DBMetaData {
/**
* checksum - 4 bytes
* version - 1 byte.
* open - 1 byte
* sequence number - 8 bytes.
* io error - 1 byte.
* file size - 4 byte.
*/
private static final int META_DATA_SIZE = 4+1+1+8+1+4;
private static final int CHECK_SUM_SIZE = 4;
private static final int CHECK_SUM_OFFSET = 0;
private long checkSum = 0;
private int version = 0;
private boolean open = false;
private long sequenceNumber = 0;
private boolean ioError = false;
private int maxFileSize = 0;
private final DBDirectory dbDirectory;
static final String METADATA_FILE_NAME = "META";
private static final Object lock = new Object();
DBMetaData(DBDirectory dbDirectory) {
this.dbDirectory = dbDirectory;
}
void loadFromFileIfExists() throws IOException {
synchronized (lock) {
Path metaFile = dbDirectory.getPath().resolve(METADATA_FILE_NAME);
if (Files.exists(metaFile)) {
try (SeekableByteChannel channel = Files.newByteChannel(metaFile)) {
ByteBuffer buff = ByteBuffer.allocate(META_DATA_SIZE);
channel.read(buff);
buff.flip();
checkSum = Utils.toUnsignedIntFromInt(buff.getInt());
version = Utils.toUnsignedByte(buff.get());
open = buff.get() != 0;
sequenceNumber = buff.getLong();
ioError = buff.get() != 0;
maxFileSize = buff.getInt();
}
}
}
}
void storeToFile() throws IOException {
synchronized (lock) {
String tempFileName = METADATA_FILE_NAME + ".temp";
Path tempFile = dbDirectory.getPath().resolve(tempFileName);
Files.deleteIfExists(tempFile);
try(FileChannel channel = FileChannel.open(tempFile, WRITE, CREATE, SYNC)) {
ByteBuffer buff = ByteBuffer.allocate(META_DATA_SIZE);
buff.position(CHECK_SUM_SIZE);
buff.put((byte)version);
buff.put((byte)(open ? 0xFF : 0));
buff.putLong(sequenceNumber);
buff.put((byte)(ioError ? 0xFF : 0));
buff.putInt(maxFileSize);
long crc32 = computeCheckSum(buff.array());
buff.putInt(CHECK_SUM_OFFSET, (int)crc32);
buff.flip();
channel.write(buff);
Files.move(tempFile, dbDirectory.getPath().resolve(METADATA_FILE_NAME), REPLACE_EXISTING, ATOMIC_MOVE);
dbDirectory.syncMetaData();
}
}
}
private long computeCheckSum(byte[] header) {
CRC32 crc32 = new CRC32();
crc32.update(header, CHECK_SUM_OFFSET + CHECK_SUM_SIZE, META_DATA_SIZE - CHECK_SUM_SIZE);
return crc32.getValue();
}
boolean isValid() {
ByteBuffer buff = ByteBuffer.allocate(META_DATA_SIZE);
buff.position(CHECK_SUM_SIZE);
buff.put((byte)version);
buff.put((byte)(open ? 0xFF : 0));
buff.putLong(sequenceNumber);
buff.put((byte)(ioError ? 0xFF : 0));
buff.putInt(maxFileSize);
return computeCheckSum(buff.array()) == checkSum;
}
boolean isOpen() {
return open;
}
void setOpen(boolean open) {
this.open = open;
}
long getSequenceNumber() {
return sequenceNumber;
}
void setSequenceNumber(long sequenceNumber) {
this.sequenceNumber = sequenceNumber;
}
boolean isIOError() {
return ioError;
}
void setIOError(boolean ioError) {
this.ioError = ioError;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public int getMaxFileSize() {
return maxFileSize;
}
public void setMaxFileSize(int maxFileSize) {
this.maxFileSize = maxFileSize;
}
}
================================================
FILE: src/main/java/com/oath/halodb/FileUtils.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.spi.FileSystemProvider;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
class FileUtils {
static void createDirectoryIfNotExists(File directory) throws IOException {
if (directory.exists()) {
if (!directory.isDirectory()) {
throw new IOException(directory.getName() + " is not a directory.");
}
return;
}
if (!directory.mkdirs()) {
throw new IOException("Cannot create directory " + directory.getName());
}
}
static void deleteDirectory(File dir) throws IOException {
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isDirectory()) {
deleteDirectory(file);
} else {
Files.delete(file.toPath());
}
}
}
Files.deleteIfExists(dir.toPath());
}
static List<Integer> listIndexFiles(File directory) {
File[] files = directory.listFiles(file -> Constants.INDEX_FILE_PATTERN.matcher(file.getName()).matches());
if (files == null)
return Collections.emptyList();
// sort in ascending order. we want the earliest index files to be processed first.
return Arrays.stream(files)
.sorted(Comparator.comparingInt(f -> getFileId(f, Constants.INDEX_FILE_PATTERN)))
.map(f -> getFileId(f, Constants.INDEX_FILE_PATTERN))
.collect(Collectors.toList());
}
/**
* Returns all *.tombstone files in the given directory sorted by file id.
*/
static File[] listTombstoneFiles(File directory) {
File[] files = directory.listFiles(file -> Constants.TOMBSTONE_FILE_PATTERN.matcher(file.getName()).matches());
if (files == null)
return new File[0];
Comparator<File> comparator = Comparator.comparingInt(f -> getFileId(f, Constants.TOMBSTONE_FILE_PATTERN));
Arrays.sort(files, comparator);
return files;
}
/**
* Returns all *.data and *.datac files in the given directory.
*/
static File[] listDataFiles(File directory) {
return directory.listFiles(file -> Constants.DATA_FILE_PATTERN.matcher(file.getName()).matches());
}
private static int getFileId(File file, Pattern pattern) {
Matcher matcher = pattern.matcher(file.getName());
if (matcher.find()) {
return Integer.valueOf(matcher.group(1));
}
throw new IllegalArgumentException("Cannot extract file id for file " + file.getPath());
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDB.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
import java.io.IOException;
import java.util.Set;
public final class HaloDB {
private HaloDBInternal dbInternal;
private File directory;
public static HaloDB open(File dirname, HaloDBOptions opts) throws HaloDBException {
HaloDB db = new HaloDB();
try {
db.dbInternal = HaloDBInternal.open(dirname, opts);
db.directory = dirname;
} catch (IOException e) {
throw new HaloDBException("Failed to open db " + dirname.getName(), e);
}
return db;
}
public static HaloDB open(String directory, HaloDBOptions opts) throws HaloDBException {
return HaloDB.open(new File(directory), opts);
}
public byte[] get(byte[] key) throws HaloDBException {
try {
return dbInternal.get(key, 1);
} catch (IOException e) {
throw new HaloDBException("Lookup failed.", e);
}
}
public boolean put(byte[] key, byte[] value) throws HaloDBException {
try {
return dbInternal.put(key, value);
} catch (IOException e) {
throw new HaloDBException("Store to db failed.", e);
}
}
public void delete(byte[] key) throws HaloDBException {
try {
dbInternal.delete(key);
} catch (IOException e) {
throw new HaloDBException("Delete operation failed.", e);
}
}
public void close() throws HaloDBException {
try {
dbInternal.close();
} catch (IOException e) {
throw new HaloDBException("Error while closing " + directory.getName(), e);
}
}
public long size() {
return dbInternal.size();
}
public HaloDBStats stats() {
return dbInternal.stats();
}
public void resetStats() {
dbInternal.resetStats();
}
public HaloDBIterator newIterator() throws HaloDBException {
return new HaloDBIterator(dbInternal);
}
public HaloDBKeyIterator newKeyIterator() {
return new HaloDBKeyIterator(dbInternal);
}
public void pauseCompaction() throws HaloDBException {
try {
dbInternal.pauseCompaction();
} catch (IOException e) {
throw new HaloDBException("Error while trying to pause compaction thread", e);
}
}
public boolean snapshot() {
return dbInternal.takeSnapshot();
}
public boolean clearSnapshot() {
return dbInternal.clearSnapshot();
}
public File getSnapshotDirectory() {
return dbInternal.getSnapshotDirectory();
}
public void resumeCompaction() {
dbInternal.resumeCompaction();
}
// methods used in tests.
@VisibleForTesting
boolean isCompactionComplete() {
return dbInternal.isCompactionComplete();
}
@VisibleForTesting
boolean isTombstoneFilesMerging() {
return dbInternal.isTombstoneFilesMerging();
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBException.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
public class HaloDBException extends Exception {
private static final long serialVersionUID = 1010101L;
public HaloDBException(String message) {
super(message);
}
public HaloDBException(String message, Throwable cause) {
super(message, cause);
}
public HaloDBException(Throwable cause) {
super(cause);
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBFile.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.primitives.Ints;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Iterator;
import java.util.function.BiFunction;
import java.util.regex.Matcher;
import static java.nio.file.StandardCopyOption.ATOMIC_MOVE;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
/**
* Represents a data file and its associated index file.
*/
class HaloDBFile {
private static final Logger logger = LoggerFactory.getLogger(HaloDBFile.class);
private volatile int writeOffset;
private FileChannel channel;
private final File backingFile;
private final DBDirectory dbDirectory;
private final int fileId;
private IndexFile indexFile;
private final HaloDBOptions options;
private long unFlushedData = 0;
static final String DATA_FILE_NAME = ".data";
static final String COMPACTED_DATA_FILE_NAME = ".datac";
private final FileType fileType;
private HaloDBFile(int fileId, File backingFile, DBDirectory dbDirectory, IndexFile indexFile, FileType fileType,
FileChannel channel, HaloDBOptions options) throws IOException {
this.fileId = fileId;
this.backingFile = backingFile;
this.dbDirectory = dbDirectory;
this.indexFile = indexFile;
this.fileType = fileType;
this.channel = channel;
this.writeOffset = Ints.checkedCast(channel.size());
this.options = options;
}
byte[] readFromFile(int offset, int length) throws IOException {
byte[] value = new byte[length];
ByteBuffer valueBuf = ByteBuffer.wrap(value);
int read = readFromFile(offset, valueBuf);
assert read == length;
return value;
}
int readFromFile(long position, ByteBuffer destinationBuffer) throws IOException {
long currentPosition = position;
int bytesRead;
do {
bytesRead = channel.read(destinationBuffer, currentPosition);
currentPosition += bytesRead;
} while (bytesRead != -1 && destinationBuffer.hasRemaining());
return (int)(currentPosition - position);
}
private Record readRecord(int offset) throws HaloDBException, IOException {
long tempOffset = offset;
// read the header from disk.
ByteBuffer headerBuf = ByteBuffer.allocate(Record.Header.HEADER_SIZE);
int readSize = readFromFile(offset, headerBuf);
if (readSize != Record.Header.HEADER_SIZE) {
throw new HaloDBException("Corrupted header at " + offset + " in file " + fileId);
}
tempOffset += readSize;
Record.Header header = Record.Header.deserialize(headerBuf);
if (!Record.Header.verifyHeader(header)) {
throw new HaloDBException("Corrupted header at " + offset + " in file " + fileId);
}
// read key-value from disk.
ByteBuffer recordBuf = ByteBuffer.allocate(header.getKeySize() + header.getValueSize());
readSize = readFromFile(tempOffset, recordBuf);
if (readSize != recordBuf.capacity()) {
throw new HaloDBException("Corrupted record at " + offset + " in file " + fileId);
}
Record record = Record.deserialize(recordBuf, header.getKeySize(), header.getValueSize());
record.setHeader(header);
int valueOffset = offset + Record.Header.HEADER_SIZE + header.getKeySize();
record.setRecordMetaData(new InMemoryIndexMetaData(fileId, valueOffset, header.getValueSize(), header.getSequenceNumber()));
return record;
}
InMemoryIndexMetaData writeRecord(Record record) throws IOException {
writeToChannel(record.serialize());
int recordSize = record.getRecordSize();
int recordOffset = writeOffset;
writeOffset += recordSize;
IndexFileEntry indexFileEntry = new IndexFileEntry(
record.getKey(), recordSize,
recordOffset, record.getSequenceNumber(),
Versions.CURRENT_INDEX_FILE_VERSION, -1
);
indexFile.write(indexFileEntry);
int valueOffset = Utils.getValueOffset(recordOffset, record.getKey());
return new InMemoryIndexMetaData(fileId, valueOffset, record.getValue().length, record.getSequenceNumber());
}
void rebuildIndexFile() throws IOException {
indexFile.delete();
indexFile = new IndexFile(fileId, dbDirectory, options);
indexFile.create();
HaloDBFileIterator iterator = new HaloDBFileIterator();
int offset = 0;
while (iterator.hasNext()) {
Record record = iterator.next();
IndexFileEntry indexFileEntry = new IndexFileEntry(
record.getKey(), record.getRecordSize(),
offset, record.getSequenceNumber(),
Versions.CURRENT_INDEX_FILE_VERSION, -1
);
indexFile.write(indexFileEntry);
offset += record.getRecordSize();
}
}
/**
* Copies to a temporary file those records whose computed checksum matches the stored one and then atomically
* rename the temp file to the current file.
* Records in the file which occur after a corrupted record are discarded.
* Index file is also recreated.
* This method is called if we detect an unclean shutdown.
*/
HaloDBFile repairFile(DBDirectory dbDirectory) throws IOException {
HaloDBFile repairFile = createRepairFile();
logger.info("Repairing file {}.", getName());
HaloDBFileIterator iterator = new HaloDBFileIterator();
int count = 0;
while (iterator.hasNext()) {
Record record = iterator.next();
// if the header is corrupted iterator will return null.
if (record != null && record.verifyChecksum()) {
repairFile.writeRecord(record);
count++;
}
else {
logger.info("Found a corrupted record after copying {} records", count);
break;
}
}
logger.info("Recovered {} records from file {} with size {}. Size after repair {}.", count, getName(), getSize(), repairFile.getSize());
repairFile.flushToDisk();
repairFile.indexFile.flushToDisk();
Files.move(repairFile.indexFile.getPath(), indexFile.getPath(), REPLACE_EXISTING, ATOMIC_MOVE);
Files.move(repairFile.getPath(), getPath(), REPLACE_EXISTING, ATOMIC_MOVE);
dbDirectory.syncMetaData();
repairFile.close();
close();
return openForReading(dbDirectory, getPath().toFile(), fileType, options);
}
private HaloDBFile createRepairFile() throws IOException {
File repairFile = dbDirectory.getPath().resolve(getName()+".repair").toFile();
while (!repairFile.createNewFile()) {
logger.info("Repair file {} already exists, probably from a previous repair which failed. Deleting and trying again", repairFile.getName());
repairFile.delete();
}
FileChannel channel = new RandomAccessFile(repairFile, "rw").getChannel();
IndexFile indexFile = new IndexFile(fileId, dbDirectory, options);
indexFile.createRepairFile();
return new HaloDBFile(fileId, repairFile, dbDirectory, indexFile, fileType, channel, options);
}
private long writeToChannel(ByteBuffer[] buffers) throws IOException {
long toWrite = 0;
for (ByteBuffer buffer : buffers) {
toWrite += buffer.remaining();
}
long written = 0;
while (written < toWrite) {
written += channel.write(buffers);
}
unFlushedData += written;
if (options.isSyncWrite() || (options.getFlushDataSizeBytes() != -1 && unFlushedData > options.getFlushDataSizeBytes())) {
flushToDisk();
unFlushedData = 0;
}
return written;
}
void flushToDisk() throws IOException {
if (channel != null && channel.isOpen())
channel.force(true);
}
long getWriteOffset() {
return writeOffset;
}
void setWriteOffset(int writeOffset) {
this.writeOffset = writeOffset;
}
long getSize() {
return writeOffset;
}
IndexFile getIndexFile() {
return indexFile;
}
FileChannel getChannel() {
return channel;
}
FileType getFileType() {
return fileType;
}
int getFileId() {
return fileId;
}
static HaloDBFile openForReading(DBDirectory dbDirectory, File filename, FileType fileType, HaloDBOptions options) throws IOException {
int fileId = HaloDBFile.getFileTimeStamp(filename);
FileChannel channel = new RandomAccessFile(filename, "r").getChannel();
IndexFile indexFile = new IndexFile(fileId, dbDirectory, options);
indexFile.open();
return new HaloDBFile(fileId, filename, dbDirectory, indexFile, fileType, channel, options);
}
static HaloDBFile create(DBDirectory dbDirectory, int fileId, HaloDBOptions options, FileType fileType) throws IOException {
BiFunction<DBDirectory, Integer, File> toFile = (fileType == FileType.DATA_FILE) ? HaloDBFile::getDataFile : HaloDBFile::getCompactedDataFile;
File file = toFile.apply(dbDirectory, fileId);
while (!file.createNewFile()) {
// file already exists try another one.
fileId++;
file = toFile.apply(dbDirectory, fileId);
}
FileChannel channel = new RandomAccessFile(file, "rw").getChannel();
//TODO: setting the length might improve performance.
//file.setLength(max_);
IndexFile indexFile = new IndexFile(fileId, dbDirectory, options);
indexFile.create();
return new HaloDBFile(fileId, file, dbDirectory, indexFile, fileType, channel, options);
}
HaloDBFileIterator newIterator() throws IOException {
return new HaloDBFileIterator();
}
void close() throws IOException {
if (channel != null) {
channel.close();
}
if (indexFile != null) {
indexFile.close();
}
}
void delete() throws IOException {
close();
if (backingFile != null)
backingFile.delete();
if (indexFile != null)
indexFile.delete();
}
String getName() {
return backingFile.getName();
}
Path getPath() {
return backingFile.toPath();
}
private static File getDataFile(DBDirectory dbDirectory, int fileId) {
return dbDirectory.getPath().resolve(fileId + DATA_FILE_NAME).toFile();
}
private static File getCompactedDataFile(DBDirectory dbDirectory, int fileId) {
return dbDirectory.getPath().resolve(fileId + COMPACTED_DATA_FILE_NAME).toFile();
}
static FileType findFileType(File file) {
String name = file.getName();
return name.endsWith(COMPACTED_DATA_FILE_NAME) ? FileType.COMPACTED_FILE : FileType.DATA_FILE;
}
static int getFileTimeStamp(File file) {
Matcher matcher = Constants.DATA_FILE_PATTERN.matcher(file.getName());
matcher.find();
String s = matcher.group(1);
return Integer.parseInt(s);
}
/**
* This iterator is intended only to be used internally as it behaves bit differently
* from expected Iterator behavior: If a record is corrupted next() will return null although hasNext()
* returns true.
*/
class HaloDBFileIterator implements Iterator<Record> {
private final int endOffset;
private int currentOffset = 0;
HaloDBFileIterator() throws IOException {
this.endOffset = Ints.checkedCast(channel.size());
}
@Override
public boolean hasNext() {
return currentOffset < endOffset;
}
@Override
public Record next() {
Record record;
try {
record = readRecord(currentOffset);
} catch (IOException | HaloDBException e) {
// we have encountered an error, probably because record is corrupted.
// we skip rest of the file and return null.
logger.error("Error in iterator", e);
currentOffset = endOffset;
return null;
}
currentOffset += record.getRecordSize();
return record;
}
}
enum FileType {
DATA_FILE, COMPACTED_FILE;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBInternal.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.Ints;
import com.google.common.util.concurrent.RateLimiter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.regex.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
class HaloDBInternal {
private static final Logger logger = LoggerFactory.getLogger(HaloDBInternal.class);
static final String SNAPSHOT_SUBDIR = "snapshot";
private DBDirectory dbDirectory;
private volatile HaloDBFile currentWriteFile;
private volatile TombstoneFile currentTombstoneFile;
private volatile Thread tombstoneMergeThread;
private Map<Integer, HaloDBFile> readFileMap = new ConcurrentHashMap<>();
HaloDBOptions options;
private InMemoryIndex inMemoryIndex;
private final Map<Integer, Integer> staleDataPerFileMap = new ConcurrentHashMap<>();
private CompactionManager compactionManager;
private AtomicInteger nextFileId;
private volatile boolean isClosing = false;
private volatile long statsResetTime = System.currentTimeMillis();
private FileLock dbLock;
private final Lock writeLock = new ReentrantLock();
private static final int maxReadAttempts = 5;
private AtomicLong noOfTombstonesCopiedDuringOpen;
private AtomicLong noOfTombstonesFoundDuringOpen;
private volatile long nextSequenceNumber;
private volatile boolean isTombstoneFilesMerging = false;
private HaloDBInternal() {}
static HaloDBInternal open(File directory, HaloDBOptions options) throws HaloDBException, IOException {
checkIfOptionsAreCorrect(options);
HaloDBInternal dbInternal = new HaloDBInternal();
try {
dbInternal.dbDirectory = DBDirectory.open(directory);
dbInternal.dbLock = dbInternal.getLock();
dbInternal.options = options;
int maxFileId = dbInternal.buildReadFileMap();
dbInternal.nextFileId = new AtomicInteger(maxFileId + 10);
dbInternal.noOfTombstonesCopiedDuringOpen = new AtomicLong(0);
dbInternal.noOfTombstonesFoundDuringOpen = new AtomicLong(0);
DBMetaData dbMetaData = new DBMetaData(dbInternal.dbDirectory);
dbMetaData.loadFromFileIfExists();
if (dbMetaData.getMaxFileSize() != 0 && dbMetaData.getMaxFileSize() != options.getMaxFileSize()) {
throw new IllegalArgumentException("File size cannot be changed after db was created. Current size " + dbMetaData.getMaxFileSize());
}
if (dbMetaData.isOpen() || dbMetaData.isIOError()) {
logger.info("DB was not shutdown correctly last time. Files may not be consistent, repairing them.");
// open flag is true, this might mean that the db was not cleanly closed the last time.
dbInternal.repairFiles();
}
dbMetaData.setOpen(true);
dbMetaData.setIOError(false);
dbMetaData.setVersion(Versions.CURRENT_META_FILE_VERSION);
dbMetaData.setMaxFileSize(options.getMaxFileSize());
dbMetaData.storeToFile();
dbInternal.compactionManager = new CompactionManager(dbInternal);
dbInternal.inMemoryIndex = new InMemoryIndex(
options.getNumberOfRecords(), options.isUseMemoryPool(),
options.getFixedKeySize(), options.getMemoryPoolChunkSize()
);
long maxSequenceNumber = dbInternal.buildInMemoryIndex();
if (maxSequenceNumber == -1L) {
dbInternal.nextSequenceNumber = 1;
logger.info("Didn't find any existing records; initializing max sequence number to 1");
} else {
dbInternal.nextSequenceNumber = maxSequenceNumber + 100;
logger.info("Found max sequence number {}, now starting from {}", maxSequenceNumber, dbInternal.nextSequenceNumber);
}
if (!options.isCompactionDisabled()) {
dbInternal.compactionManager.startCompactionThread();
}
else {
logger.warn("Compaction is disabled in HaloDBOption. This should happen only in tests");
}
// merge tombstone files at background if clean up set to true
if (options.isCleanUpTombstonesDuringOpen()) {
dbInternal.isTombstoneFilesMerging = true;
dbInternal.tombstoneMergeThread = new Thread(() -> { dbInternal.mergeTombstoneFiles(); });
dbInternal.tombstoneMergeThread.start();
}
logger.info("Opened HaloDB {}", directory.getName());
logger.info("maxFileSize - {}", options.getMaxFileSize());
logger.info("compactionThresholdPerFile - {}", options.getCompactionThresholdPerFile());
} catch (Exception e) {
// release the lock if open() failed.
if (dbInternal.dbLock != null) {
dbInternal.dbLock.close();
}
throw e;
}
return dbInternal;
}
synchronized void close() throws IOException {
writeLock.lock();
try {
if (isClosing) {
// instance already closed.
return;
}
isClosing = true;
try {
if(!compactionManager.stopCompactionThread(true))
setIOErrorFlag();
} catch (IOException e) {
logger.error("Error while stopping compaction thread. Setting IOError flag", e);
setIOErrorFlag();
}
if (isTombstoneFilesMerging) {
try {
tombstoneMergeThread.join();
} catch (InterruptedException e) {
logger.error("Interrupted when waiting the tombstone files merging");
setIOErrorFlag();
}
}
if (options.isCleanUpInMemoryIndexOnClose())
inMemoryIndex.close();
if (currentWriteFile != null) {
currentWriteFile.flushToDisk();
currentWriteFile.getIndexFile().flushToDisk();
currentWriteFile.close();
}
if (currentTombstoneFile != null) {
currentTombstoneFile.flushToDisk();
currentTombstoneFile.close();
}
for (HaloDBFile file : readFileMap.values()) {
file.close();
}
DBMetaData metaData = new DBMetaData(dbDirectory);
metaData.loadFromFileIfExists();
metaData.setOpen(false);
metaData.storeToFile();
dbDirectory.close();
if (dbLock != null) {
dbLock.close();
}
} finally {
writeLock.unlock();
}
}
boolean put(byte[] key, byte[] value) throws IOException, HaloDBException {
if (key.length > Byte.MAX_VALUE) {
throw new HaloDBException("key length cannot exceed " + Byte.MAX_VALUE);
}
//TODO: more fine-grained locking is possible.
writeLock.lock();
try {
Record record = new Record(key, value);
record.setSequenceNumber(getNextSequenceNumber());
record.setVersion(Versions.CURRENT_DATA_FILE_VERSION);
InMemoryIndexMetaData entry = writeRecordToFile(record);
markPreviousVersionAsStale(key);
//TODO: implement getAndSet and use the return value for
//TODO: markPreviousVersionAsStale method.
return inMemoryIndex.put(key, entry);
} finally {
writeLock.unlock();
}
}
byte[] get(byte[] key, int attemptNumber) throws IOException, HaloDBException {
if (attemptNumber > maxReadAttempts) {
logger.error("Tried {} attempts but read failed", attemptNumber-1);
throw new HaloDBException("Tried " + (attemptNumber-1) + " attempts but failed.");
}
InMemoryIndexMetaData metaData = inMemoryIndex.get(key);
if (metaData == null) {
return null;
}
HaloDBFile readFile = readFileMap.get(metaData.getFileId());
if (readFile == null) {
logger.debug("File {} not present. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
return get(key, attemptNumber+1);
}
try {
return readFile.readFromFile(metaData.getValueOffset(), metaData.getValueSize());
}
catch (ClosedChannelException e) {
if (!isClosing) {
logger.debug("File {} was closed. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
return get(key, attemptNumber+1);
}
// trying to read after HaloDB.close() method called.
throw e;
}
}
int get(byte[] key, ByteBuffer buffer) throws IOException {
InMemoryIndexMetaData metaData = inMemoryIndex.get(key);
if (metaData == null) {
return 0;
}
HaloDBFile readFile = readFileMap.get(metaData.getFileId());
if (readFile == null) {
logger.debug("File {} not present. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
return get(key, buffer);
}
buffer.clear();
buffer.limit(metaData.getValueSize());
try {
int read = readFile.readFromFile(metaData.getValueOffset(), buffer);
buffer.flip();
return read;
}
catch (ClosedChannelException e) {
if (!isClosing) {
logger.debug("File {} was closed. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
return get(key, buffer);
}
// trying to read after HaloDB.close() method called.
throw e;
}
}
//TODO: use fine-grained lock if possible
synchronized boolean takeSnapshot() {
logger.info("Start generating the snapshot");
if (isTombstoneFilesMerging) {
logger.info("DB is merging the tombstone files now. Wait it finished");
try {
tombstoneMergeThread.join();
} catch (InterruptedException e) {
logger.error("Interrupted when waiting the tombstone files merging");
return false;
}
}
try {
final int currentWriteFileId;
compactionManager.pauseCompactionThread();
// Only support one snapshot now
// TODO: support multiple snapshots if needed
File snapshotDir = getSnapshotDirectory();
if (snapshotDir.exists()) {
logger.warn("The snapshot dir is already existed. Delete the old one.");
FileUtils.deleteDirectory(snapshotDir);
}
FileUtils.createDirectoryIfNotExists(snapshotDir);
logger.info("Created directory for snapshot {}", snapshotDir.toString());
writeLock.lock();
try {
forceRollOverCurrentWriteFile();
currentTombstoneFile = forceRollOverTombstoneFile(currentTombstoneFile);
currentWriteFileId = currentWriteFile.getFileId();
} catch (IOException e) {
logger.warn("IO exception when rollover current write files", e);
return false;
} finally {
writeLock.unlock();
}
File[] filesToLink = dbDirectory.getPath().toFile()
.listFiles(file -> {
Matcher m = Constants.STORAGE_FILE_PATTERN.matcher(file.getName());
return m.matches() && (Integer.parseInt(m.group(1)) < currentWriteFileId);
});
compactionManager.forceRolloverCurrentWriteFile();
logger.info("Storage files number need to be linked: {}", filesToLink.length);
for (File file : filesToLink) {
Path dest = Paths.get(snapshotDir.getAbsolutePath(), file.getName());
logger.debug("Create file link from file {} to {}", file.getName(),
dest.toFile().getAbsoluteFile());
Files.createLink(dest, file.toPath());
}
} catch(IOException e) {
logger.warn("IOException when creating snapshot", e);
return false;
} finally {
compactionManager.resumeCompaction();
}
return true;
}
File getSnapshotDirectory() {
Path dbDirectoryPath = dbDirectory.getPath();
return Paths.get(dbDirectoryPath.toFile().getAbsolutePath(), SNAPSHOT_SUBDIR).toFile();
}
boolean clearSnapshot() {
File snapshotDir = getSnapshotDirectory();
if (snapshotDir.exists()) {
try {
FileUtils.deleteDirectory(snapshotDir);
} catch (IOException e) {
logger.error("snapshot deletion error", e);
return false;
}
return true;
} else {
logger.info("snapshot not existed");
return true;
}
}
void delete(byte[] key) throws IOException {
writeLock.lock();
try {
InMemoryIndexMetaData metaData = inMemoryIndex.get(key);
if (metaData != null) {
//TODO: implement a getAndRemove method in InMemoryIndex.
inMemoryIndex.remove(key);
TombstoneEntry entry =
new TombstoneEntry(key, getNextSequenceNumber(), -1, Versions.CURRENT_TOMBSTONE_FILE_VERSION);
currentTombstoneFile = rollOverTombstoneFile(entry, currentTombstoneFile);
currentTombstoneFile.write(entry);
markPreviousVersionAsStale(key, metaData);
}
} finally {
writeLock.unlock();
}
}
long size() {
return inMemoryIndex.size();
}
void setIOErrorFlag() throws IOException {
DBMetaData metaData = new DBMetaData(dbDirectory);
metaData.loadFromFileIfExists();
metaData.setIOError(true);
metaData.storeToFile();
}
void pauseCompaction() throws IOException {
compactionManager.pauseCompactionThread();
}
void resumeCompaction() {
compactionManager.resumeCompaction();
}
private InMemoryIndexMetaData writeRecordToFile(Record record) throws IOException, HaloDBException {
rollOverCurrentWriteFile(record);
return currentWriteFile.writeRecord(record);
}
private void rollOverCurrentWriteFile(Record record) throws IOException {
int size = record.getKey().length + record.getValue().length + Record.Header.HEADER_SIZE;
if ((currentWriteFile == null || currentWriteFile.getWriteOffset() + size > options.getMaxFileSize())
&& !isClosing) {
forceRollOverCurrentWriteFile();
}
}
private void forceRollOverCurrentWriteFile() throws IOException {
if (currentWriteFile != null) {
currentWriteFile.flushToDisk();
currentWriteFile.getIndexFile().flushToDisk();
}
currentWriteFile = createHaloDBFile(HaloDBFile.FileType.DATA_FILE);
dbDirectory.syncMetaData();
}
private TombstoneFile rollOverTombstoneFile(TombstoneEntry entry, TombstoneFile tombstoneFile) throws IOException {
int size = entry.getKey().length + TombstoneEntry.TOMBSTONE_ENTRY_HEADER_SIZE;
if ((tombstoneFile == null ||
tombstoneFile.getWriteOffset() + size > options.getMaxTombstoneFileSize()) && !isClosing) {
tombstoneFile = forceRollOverTombstoneFile(tombstoneFile);
}
return tombstoneFile;
}
private TombstoneFile forceRollOverTombstoneFile(TombstoneFile tombstoneFile) throws IOException {
if (tombstoneFile != null) {
tombstoneFile.flushToDisk();
tombstoneFile.close();
}
tombstoneFile = TombstoneFile.create(dbDirectory, getNextFileId(), options);
dbDirectory.syncMetaData();
return tombstoneFile;
}
private void markPreviousVersionAsStale(byte[] key) {
InMemoryIndexMetaData recordMetaData = inMemoryIndex.get(key);
if (recordMetaData != null) {
markPreviousVersionAsStale(key, recordMetaData);
}
}
private void markPreviousVersionAsStale(byte[] key, InMemoryIndexMetaData recordMetaData) {
int staleRecordSize = Utils.getRecordSize(key.length, recordMetaData.getValueSize());
addFileToCompactionQueueIfThresholdCrossed(recordMetaData.getFileId(), staleRecordSize);
}
void addFileToCompactionQueueIfThresholdCrossed(int fileId, int staleRecordSize) {
HaloDBFile file = readFileMap.get(fileId);
if (file == null)
return;
int staleSizeInFile = updateStaleDataMap(fileId, staleRecordSize);
if (staleSizeInFile >= file.getSize() * options.getCompactionThresholdPerFile()) {
// We don't want to compact the files the writer thread and the compaction thread is currently writing to.
if (getCurrentWriteFileId() != fileId && compactionManager.getCurrentWriteFileId() != fileId) {
if(compactionManager.submitFileForCompaction(fileId)) {
staleDataPerFileMap.remove(fileId);
}
}
}
}
private int updateStaleDataMap(int fileId, int staleDataSize) {
return staleDataPerFileMap.merge(fileId, staleDataSize, (oldValue, newValue) -> oldValue + newValue);
}
void markFileAsCompacted(int fileId) {
staleDataPerFileMap.remove(fileId);
}
InMemoryIndex getInMemoryIndex() {
return inMemoryIndex;
}
HaloDBFile createHaloDBFile(HaloDBFile.FileType fileType) throws IOException {
HaloDBFile file = HaloDBFile.create(dbDirectory, getNextFileId(), options, fileType);
if(readFileMap.putIfAbsent(file.getFileId(), file) != null) {
throw new IOException("Error while trying to create file " + file.getName() + " file with the given id already exists in the map");
}
return file;
}
private List<HaloDBFile> openDataFilesForReading() throws IOException {
File[] files = dbDirectory.listDataFiles();
List<HaloDBFile> result = new ArrayList<>();
for (File f : files) {
HaloDBFile.FileType fileType = HaloDBFile.findFileType(f);
result.add(HaloDBFile.openForReading(dbDirectory, f, fileType, options));
}
return result;
}
/**
* Opens data files for reading and creates a map with file id as the key.
* Also returns the latest file id in the directory which is then used
* to determine the next file id.
*/
private int buildReadFileMap() throws HaloDBException, IOException {
int maxFileId = Integer.MIN_VALUE;
for (HaloDBFile file : openDataFilesForReading()) {
if (readFileMap.putIfAbsent(file.getFileId(), file) != null) {
// There should only be a single file with a given file id.
throw new HaloDBException("Found duplicate file with id " + file.getFileId());
}
maxFileId = Math.max(maxFileId, file.getFileId());
}
if (maxFileId == Integer.MIN_VALUE) {
// no files in the directory. use the current time as the first file id.
maxFileId = Ints.checkedCast(System.currentTimeMillis() / 1000);
}
return maxFileId;
}
private int getNextFileId() {
return nextFileId.incrementAndGet();
}
private Optional<HaloDBFile> getLatestDataFile(HaloDBFile.FileType fileType) {
return readFileMap.values()
.stream()
.filter(f -> f.getFileType() == fileType)
.max(Comparator.comparingInt(HaloDBFile::getFileId));
}
private long buildInMemoryIndex() throws IOException {
int nThreads = options.getBuildIndexThreads();
logger.info("Building index in parallel with {} threads", nThreads);
ExecutorService executor = Executors.newFixedThreadPool(nThreads);
try {
return buildInMemoryIndex(executor);
} finally {
executor.shutdown();
}
}
private long buildInMemoryIndex(ExecutorService executor) throws IOException {
List<Integer> indexFiles = dbDirectory.listIndexFiles();
logger.info("About to scan {} index files to construct index ...", indexFiles.size());
long start = System.currentTimeMillis();
long maxSequenceNumber = -1l;
List<ProcessIndexFileTask> indexFileTasks = new ArrayList<>();
for (int fileId : indexFiles) {
IndexFile indexFile = new IndexFile(fileId, dbDirectory, options);
indexFileTasks.add(new ProcessIndexFileTask(indexFile, fileId));
}
try {
List<Future<Long>> results = executor.invokeAll(indexFileTasks);
for (Future<Long> result : results) {
maxSequenceNumber = Long.max(result.get(), maxSequenceNumber);
}
} catch (InterruptedException ie) {
throw new IOException("Building index is interrupted");
} catch (ExecutionException ee) {
throw new IOException("Error happened during building in-memory index", ee);
}
logger.info("Completed scanning all index files in {}s", (System.currentTimeMillis() - start) / 1000);
// Scan all the tombstone files and remove records from index.
start = System.currentTimeMillis();
File[] tombStoneFiles = dbDirectory.listTombstoneFiles();
logger.info("About to scan {} tombstone files ...", tombStoneFiles.length);
List<ProcessTombstoneFileTask> tombstoneFileTasks = new ArrayList<>();
for (File file : tombStoneFiles) {
TombstoneFile tombstoneFile = new TombstoneFile(file, options, dbDirectory);
tombstoneFileTasks.add(new ProcessTombstoneFileTask(tombstoneFile));
}
try {
List<Future<Long>> results = executor.invokeAll(tombstoneFileTasks);
for (Future<Long> result : results) {
maxSequenceNumber = Long.max(result.get(), maxSequenceNumber);
}
} catch (InterruptedException ie) {
throw new IOException("Building index is interrupted");
} catch (ExecutionException ee) {
throw new IOException("Error happened during building in-memory index", ee);
}
logger.info("Completed scanning all tombstone files in {}s", (System.currentTimeMillis() - start) / 1000);
return maxSequenceNumber;
}
class ProcessIndexFileTask implements Callable<Long> {
private final IndexFile indexFile;
private final int fileId;
public ProcessIndexFileTask(IndexFile indexFile, int fileId) {
this.indexFile = indexFile;
this.fileId = fileId;
}
@Override
public Long call() throws IOException {
long maxSequenceNumber = -1;
indexFile.open();
IndexFile.IndexFileIterator iterator = indexFile.newIterator();
// build the in-memory index by scanning all index files.
int count = 0, inserted = 0;
while (iterator.hasNext()) {
IndexFileEntry indexFileEntry = iterator.next();
byte[] key = indexFileEntry.getKey();
int recordOffset = indexFileEntry.getRecordOffset();
int recordSize = indexFileEntry.getRecordSize();
long sequenceNumber = indexFileEntry.getSequenceNumber();
maxSequenceNumber = Long.max(sequenceNumber, maxSequenceNumber);
int valueOffset = Utils.getValueOffset(recordOffset, key);
int valueSize = recordSize - (Record.Header.HEADER_SIZE + key.length);
count++;
InMemoryIndexMetaData metaData = new InMemoryIndexMetaData(fileId, valueOffset, valueSize, sequenceNumber);
if (!inMemoryIndex.putIfAbsent(key, metaData)) {
while (true) {
InMemoryIndexMetaData existing = inMemoryIndex.get(key);
if (existing.getSequenceNumber() >= sequenceNumber) {
// stale data, update stale data map.
addFileToCompactionQueueIfThresholdCrossed(fileId, recordSize);
break;
}
if (inMemoryIndex.replace(key, existing, metaData)) {
// update stale data map for the previous version.
addFileToCompactionQueueIfThresholdCrossed(existing.getFileId(), Utils.getRecordSize(key.length, existing.getValueSize()));
inserted++;
break;
}
}
} else {
inserted++;
}
}
logger.debug("Completed scanning index file {}. Found {} records, inserted {} records", fileId, count, inserted);
indexFile.close();
return maxSequenceNumber;
}
}
class ProcessTombstoneFileTask implements Callable<Long> {
private final TombstoneFile tombstoneFile;
public ProcessTombstoneFileTask(TombstoneFile tombstoneFile) {
this.tombstoneFile = tombstoneFile;
}
@Override
public Long call() throws IOException {
long maxSequenceNumber = -1;
tombstoneFile.open();
TombstoneFile rolloverFile = null;
TombstoneFile.TombstoneFileIterator iterator = tombstoneFile.newIterator();
long count = 0, active = 0, copied = 0;
while (iterator.hasNext()) {
TombstoneEntry entry = iterator.next();
byte[] key = entry.getKey();
long sequenceNumber = entry.getSequenceNumber();
maxSequenceNumber = Long.max(sequenceNumber, maxSequenceNumber);
count++;
InMemoryIndexMetaData existing = inMemoryIndex.get(key);
if (existing != null && existing.getSequenceNumber() < sequenceNumber) {
// Found a tombstone record which happened after the version currently in index; remove.
inMemoryIndex.remove(key);
// update stale data map for the previous version.
addFileToCompactionQueueIfThresholdCrossed(
existing.getFileId(), Utils.getRecordSize(key.length, existing.getValueSize()));
active++;
if (options.isCleanUpTombstonesDuringOpen()) {
rolloverFile = rollOverTombstoneFile(entry, rolloverFile);
rolloverFile.write(entry);
copied++;
}
}
}
logger.debug("Completed scanning tombstone file {}. Found {} tombstones, {} are still active",
tombstoneFile.getName(), count, active);
tombstoneFile.close();
if (options.isCleanUpTombstonesDuringOpen()) {
logger.debug("Copied {} out of {} tombstones. Deleting {}", copied, count, tombstoneFile.getName());
if (rolloverFile != null) {
logger.debug("Closing rollover tombstone file {}", rolloverFile.getName());
rolloverFile.flushToDisk();
rolloverFile.close();
}
tombstoneFile.delete();
}
noOfTombstonesCopiedDuringOpen.addAndGet(copied);
noOfTombstonesFoundDuringOpen.addAndGet(count);
return maxSequenceNumber;
}
}
HaloDBFile getHaloDBFile(int fileId) {
return readFileMap.get(fileId);
}
void deleteHaloDBFile(int fileId) throws IOException {
HaloDBFile file = readFileMap.get(fileId);
if (file != null) {
readFileMap.remove(fileId);
file.delete();
}
staleDataPerFileMap.remove(fileId);
}
/**
* If options.isCleanUpTombstonesDuringOpen set to true, all inactive entries,
* i.e. physically deleted records, will be dropped during db open.
* Refer to ProcessTombstoneFileTask class and buildInMemoryIndex()
* To shorten db open time, active entries, i.e. not physically deleted
* records, in each tombstone file are rolled over to a corresponding
* new tombstone file. Therefore, the new tombstone file size might be very
* small depends on number of active entries in each tombstone file.
* A tombstone file won't be deleted as long as it has at least 1 active
* entry. This function provide a way to merge small tombstone files in
* offline mode. options.maxTombstoneFileSize still apply to merged file
*/
private void mergeTombstoneFiles() {
File[] tombStoneFiles = dbDirectory.listTombstoneFiles();
logger.info("About to merge {} tombstone files ...", tombStoneFiles.length);
TombstoneFile mergedTombstoneFile = null;
// Use compaction job rate as write rate limiter to avoid IO impact
final RateLimiter rateLimiter = RateLimiter.create(options.getCompactionJobRate());
for (File file : tombStoneFiles) {
TombstoneFile tombstoneFile = new TombstoneFile(file, options, dbDirectory);
if (currentTombstoneFile != null && tombstoneFile.getName().equals(currentTombstoneFile.getName())) {
continue; // not touch current tombstone file
}
try {
tombstoneFile.open();
TombstoneFile.TombstoneFileIterator iterator = tombstoneFile.newIterator();
long count = 0;
while (iterator.hasNext()) {
TombstoneEntry entry = iterator.next();
rateLimiter.acquire(entry.size());
count++;
mergedTombstoneFile = rollOverTombstoneFile(entry, mergedTombstoneFile);
mergedTombstoneFile.write(entry);
}
if (count > 0) {
logger.debug("Merged {} tombstones from {} to {}",
count, tombstoneFile.getName(), mergedTombstoneFile.getName());
}
tombstoneFile.close();
tombstoneFile.delete();
} catch (IOException e) {
logger.error("IO exception when merging tombstone file", e);
}
}
if (mergedTombstoneFile != null) {
try {
mergedTombstoneFile.close();
} catch (IOException e) {
logger.error("IO exception when closing tombstone file: {}", mergedTombstoneFile.getName(), e);
}
}
logger.info("Tombstone files count, before merge:{}, after merge:{}",
tombStoneFiles.length, dbDirectory.listTombstoneFiles().length);
isTombstoneFilesMerging = false;
}
private void repairFiles() {
getLatestDataFile(HaloDBFile.FileType.DATA_FILE).ifPresent(file -> {
try {
logger.info("Repairing file {}.data", file.getFileId());
HaloDBFile repairedFile = file.repairFile(dbDirectory);
readFileMap.put(repairedFile.getFileId(), repairedFile);
}
catch (IOException e) {
throw new RuntimeException("Exception while repairing data file " + file.getFileId() + " which might be corrupted", e);
}
});
getLatestDataFile(HaloDBFile.FileType.COMPACTED_FILE).ifPresent(file -> {
try {
logger.info("Repairing file {}.datac", file.getFileId());
HaloDBFile repairedFile = file.repairFile(dbDirectory);
readFileMap.put(repairedFile.getFileId(), repairedFile);
}
catch (IOException e) {
throw new RuntimeException("Exception while repairing datac file " + file.getFileId() + " which might be corrupted", e);
}
});
File[] tombstoneFiles = dbDirectory.listTombstoneFiles();
if (tombstoneFiles != null && tombstoneFiles.length > 0) {
TombstoneFile lastFile = new TombstoneFile(tombstoneFiles[tombstoneFiles.length-1], options, dbDirectory);
try {
logger.info("Repairing {} file", lastFile.getName());
lastFile.open();
TombstoneFile repairedFile = lastFile.repairFile(dbDirectory);
repairedFile.close();
} catch (IOException e) {
throw new RuntimeException("Exception while repairing tombstone file " + lastFile.getName() + " which might be corrupted", e);
}
}
}
private FileLock getLock() throws HaloDBException {
try {
FileLock lock = FileChannel.open(dbDirectory.getPath().resolve("LOCK"), StandardOpenOption.CREATE, StandardOpenOption.WRITE).tryLock();
if (lock == null) {
logger.error("Error while opening db. Another process already holds a lock to this db.");
throw new HaloDBException("Another process already holds a lock for this db.");
}
return lock;
}
catch (OverlappingFileLockException e) {
logger.error("Error while opening db. Another process already holds a lock to this db.");
throw new HaloDBException("Another process already holds a lock for this db.");
}
catch (IOException e) {
logger.error("Error while trying to get a lock on the db.", e);
throw new HaloDBException("Error while trying to get a lock on the db.", e);
}
}
DBDirectory getDbDirectory() {
return dbDirectory;
}
Set<Integer> listDataFileIds() {
return new HashSet<>(readFileMap.keySet());
}
boolean isRecordFresh(byte[] key, InMemoryIndexMetaData metaData) {
InMemoryIndexMetaData currentMeta = inMemoryIndex.get(key);
return
currentMeta != null
&&
metaData.getFileId() == currentMeta.getFileId()
&&
metaData.getValueOffset() == currentMeta.getValueOffset();
}
private long getNextSequenceNumber() {
return nextSequenceNumber++;
}
private int getCurrentWriteFileId() {
return currentWriteFile != null ? currentWriteFile.getFileId() : -1;
}
private static void checkIfOptionsAreCorrect(HaloDBOptions options) {
if (options.isUseMemoryPool() && (options.getFixedKeySize() < 0 || options.getFixedKeySize() > Byte.MAX_VALUE)) {
throw new IllegalArgumentException("fixedKeySize must be set and should be less than 128 when using memory pool");
}
}
boolean isClosing() {
return isClosing;
}
HaloDBStats stats() {
OffHeapHashTableStats stats = inMemoryIndex.stats();
return new HaloDBStats(
statsResetTime,
stats.getSize(),
compactionManager.isCompactionRunning(),
compactionManager.noOfFilesPendingCompaction(),
computeStaleDataMapForStats(),
stats.getRehashCount(),
inMemoryIndex.getNoOfSegments(),
inMemoryIndex.getMaxSizeOfEachSegment(),
stats.getSegmentStats(),
dbDirectory.listDataFiles().length,
dbDirectory.listTombstoneFiles().length,
noOfTombstonesFoundDuringOpen.get(),
options.isCleanUpTombstonesDuringOpen() ?
noOfTombstonesFoundDuringOpen.get() - noOfTombstonesCopiedDuringOpen.get() : 0,
compactionManager.getNumberOfRecordsCopied(),
compactionManager.getNumberOfRecordsReplaced(),
compactionManager.getNumberOfRecordsScanned(),
compactionManager.getSizeOfRecordsCopied(),
compactionManager.getSizeOfFilesDeleted(),
compactionManager.getSizeOfFilesDeleted()-compactionManager.getSizeOfRecordsCopied(),
compactionManager.getCompactionJobRateSinceBeginning(),
options.clone()
);
}
synchronized void resetStats() {
inMemoryIndex.resetStats();
compactionManager.resetStats();
statsResetTime = System.currentTimeMillis();
}
private Map<Integer, Double> computeStaleDataMapForStats() {
Map<Integer, Double> stats = new HashMap<>();
staleDataPerFileMap.forEach((fileId, staleData) -> {
HaloDBFile file = readFileMap.get(fileId);
if (file != null && file.getSize() > 0) {
double stalePercent = (1.0*staleData/file.getSize()) * 100;
stats.put(fileId, stalePercent);
}
});
return stats;
}
// Used only in tests.
@VisibleForTesting
boolean isCompactionComplete() {
return compactionManager.isCompactionComplete();
}
@VisibleForTesting
boolean isTombstoneFilesMerging() {
return isTombstoneFilesMerging;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBIterator.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.Iterator;
import java.util.NoSuchElementException;
public class HaloDBIterator implements Iterator<Record> {
private static final Logger logger = LoggerFactory.getLogger(HaloDBIterator.class);
private Iterator<Integer> outer;
private Iterator<IndexFileEntry> inner;
private HaloDBFile currentFile;
private Record next;
private final HaloDBInternal dbInternal;
HaloDBIterator(HaloDBInternal dbInternal) {
this.dbInternal = dbInternal;
outer = dbInternal.listDataFileIds().iterator();
}
@Override
public boolean hasNext() {
if (next != null) {
return true;
}
try {
// inner == null means this is the first time hasNext() is called.
// use moveToNextFile() to move to the first file.
if (inner == null && !moveToNextFile()) {
return false;
}
do {
if (readNextRecord()) {
return true;
}
} while (moveToNextFile());
return false;
} catch (IOException e) {
logger.error("Error in Iterator", e);
return false;
}
}
@Override
public Record next() {
if (hasNext()) {
Record record = next;
next = null;
return record;
}
throw new NoSuchElementException();
}
private boolean moveToNextFile() throws IOException {
while (outer.hasNext()) {
int fileId = outer.next();
currentFile = dbInternal.getHaloDBFile(fileId);
if (currentFile != null) {
try {
inner = currentFile.getIndexFile().newIterator();
return true;
} catch (ClosedChannelException e) {
if (dbInternal.isClosing()) {
//TODO: define custom Exception classes for HaloDB.
throw new RuntimeException("DB is closing");
}
logger.debug("Index file {} closed, probably by compaction thread. Skipping to next one", fileId);
}
}
logger.debug("Data file {} deleted, probably by compaction thread. Skipping to next one", fileId);
}
return false;
}
private boolean readNextRecord() {
while (inner.hasNext()) {
IndexFileEntry entry = inner.next();
try {
try {
next = readRecordFromDataFile(entry);
if (next != null) {
return true;
}
} catch (ClosedChannelException e) {
if (dbInternal.isClosing()) {
throw new RuntimeException("DB is closing");
}
logger.debug("Data file {} closed, probably by compaction thread. Skipping to next one", currentFile.getFileId());
break;
}
} catch (IOException e) {
logger.info("Error in iterator", e);
break;
}
}
return false;
}
private Record readRecordFromDataFile(IndexFileEntry entry) throws IOException {
InMemoryIndexMetaData meta = Utils.getMetaData(entry, currentFile.getFileId());
Record record = null;
if (dbInternal.isRecordFresh(entry.getKey(), meta)) {
byte[] value = currentFile.readFromFile(
Utils.getValueOffset(entry.getRecordOffset(), entry.getKey()),
Utils.getValueSize(entry.getRecordSize(), entry.getKey()));
record = new Record(entry.getKey(), value);
record.setRecordMetaData(meta);
}
return record;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBKeyIterator.java
================================================
package com.oath.halodb;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HaloDBKeyIterator implements Iterator<RecordKey>{
private static final Logger logger = LoggerFactory.getLogger(HaloDBIterator.class);
private Iterator<Integer> outer;
private Iterator<IndexFileEntry> inner;
private HaloDBFile currentFile;
private RecordKey next;
private final HaloDBInternal dbInternal;
HaloDBKeyIterator(HaloDBInternal dbInternal) {
this.dbInternal = dbInternal;
outer = dbInternal.listDataFileIds().iterator();
}
@Override
public boolean hasNext() {
if (next != null) {
return true;
}
try {
// inner == null means this is the first time hasNext() is called.
// use moveToNextFile() to move to the first file.
if (inner == null && !moveToNextFile()) {
return false;
}
do {
if (readNextRecord()) {
return true;
}
} while (moveToNextFile());
return false;
} catch (IOException e) {
logger.error("Error in Iterator", e);
return false;
}
}
@Override
public RecordKey next() {
if (hasNext()) {
RecordKey key = next;
next = null;
return key;
}
throw new NoSuchElementException();
}
private boolean moveToNextFile() throws IOException {
while (outer.hasNext()) {
int fileId = outer.next();
currentFile = dbInternal.getHaloDBFile(fileId);
if (currentFile != null) {
try {
inner = currentFile.getIndexFile().newIterator();
return true;
} catch (ClosedChannelException e) {
if (dbInternal.isClosing()) {
//TODO: define custom Exception classes for HaloDB.
throw new RuntimeException("DB is closing");
}
logger.debug("Index file {} closed, probably by compaction thread. Skipping to next one", fileId);
}
}
logger.debug("Data file {} deleted, probably by compaction thread. Skipping to next one", fileId);
}
return false;
}
private boolean readNextRecord() {
while (inner.hasNext()) {
IndexFileEntry entry = inner.next();
try {
try {
next = readValidRecordKey(entry);
if (next != null) {
return true;
}
} catch (ClosedChannelException e) {
if (dbInternal.isClosing()) {
throw new RuntimeException("DB is closing");
}
logger.debug("Data file {} closed, probably by compaction thread. Skipping to next one", currentFile.getFileId());
break;
}
} catch (IOException e) {
logger.info("Error in iterator", e);
break;
}
}
return false;
}
private RecordKey readValidRecordKey(IndexFileEntry entry) throws IOException {
InMemoryIndexMetaData meta = Utils.getMetaData(entry, currentFile.getFileId());
RecordKey key = null;
if (dbInternal.isRecordFresh(entry.getKey(), meta)) {
key = new RecordKey(entry.getKey());
}
return key;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBOptions.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.base.MoreObjects;
public class HaloDBOptions implements Cloneable {
// threshold of stale data at which file needs to be compacted.
private double compactionThresholdPerFile = 0.75;
private int maxFileSize = 1024 * 1024; /* 1mb file recordSize */
// To keep backward compatibility, initialize to 0 which means
// it will fall back to use maxFileSize, see the getter below
private int maxTombstoneFileSize = 0;
// Data will be flushed to disk after flushDataSizeBytes have been written.
// -1 disables explicit flushing and let the kernel handle it.
private long flushDataSizeBytes = -1;
// Write call will sync data to disk before returning.
// If enabled trades off write throughput for durability.
private boolean syncWrite = false;
private int numberOfRecords = 1_000_000;
// MB of data to be compacted per second.
private int compactionJobRate = 1024 * 1024 * 1024;
private boolean cleanUpInMemoryIndexOnClose = false;
private boolean cleanUpTombstonesDuringOpen = false;
private boolean useMemoryPool = false;
private int fixedKeySize = Byte.MAX_VALUE;
private int memoryPoolChunkSize = 16 * 1024 * 1024;
// Number of threads to scan index and tombstone files
// to build in-memory index at db open
private int buildIndexThreads = 1;
// Just to avoid clients having to deal with CloneNotSupportedException
public HaloDBOptions clone() {
try {
return (HaloDBOptions) super.clone();
} catch (CloneNotSupportedException e) {
return null;
}
}
@Override
public String toString() {
return MoreObjects.toStringHelper("")
.add("compactionThresholdPerFile", compactionThresholdPerFile)
.add("maxFileSize", maxFileSize)
.add("maxTombstoneFileSize", getMaxTombstoneFileSize())
.add("flushDataSizeBytes", flushDataSizeBytes)
.add("syncWrite", syncWrite)
.add("numberOfRecords", numberOfRecords)
.add("compactionJobRate", compactionJobRate)
.add("cleanUpInMemoryIndexOnClose", cleanUpInMemoryIndexOnClose)
.add("cleanUpTombstonesDuringOpen", cleanUpTombstonesDuringOpen)
.add("useMemoryPool", useMemoryPool)
.add("fixedKeySize", fixedKeySize)
.add("memoryPoolChunkSize", memoryPoolChunkSize)
.add("buildIndexThreads", buildIndexThreads)
.toString();
}
public void setCompactionThresholdPerFile(double compactionThresholdPerFile) {
this.compactionThresholdPerFile = compactionThresholdPerFile;
}
public void setMaxFileSize(int maxFileSize) {
if (maxFileSize <= 0) {
throw new IllegalArgumentException("maxFileSize should be > 0");
}
this.maxFileSize = maxFileSize;
}
public void setMaxTombstoneFileSize(int maxFileSize) {
if (maxFileSize <= 0) {
throw new IllegalArgumentException("maxFileSize should be > 0");
}
this.maxTombstoneFileSize = maxFileSize;
}
public void setFlushDataSizeBytes(long flushDataSizeBytes) {
this.flushDataSizeBytes = flushDataSizeBytes;
}
public void setNumberOfRecords(int numberOfRecords) {
this.numberOfRecords = numberOfRecords;
}
public void setCompactionJobRate(int compactionJobRate) {
this.compactionJobRate = compactionJobRate;
}
public void setCleanUpInMemoryIndexOnClose(boolean cleanUpInMemoryIndexOnClose) {
this.cleanUpInMemoryIndexOnClose = cleanUpInMemoryIndexOnClose;
}
public double getCompactionThresholdPerFile() {
return compactionThresholdPerFile;
}
public int getMaxFileSize() {
return maxFileSize;
}
public int getMaxTombstoneFileSize() {
return maxTombstoneFileSize > 0 ? maxTombstoneFileSize : maxFileSize;
}
public long getFlushDataSizeBytes() {
return flushDataSizeBytes;
}
public int getNumberOfRecords() {
return numberOfRecords;
}
public int getCompactionJobRate() {
return compactionJobRate;
}
public boolean isCleanUpInMemoryIndexOnClose() {
return cleanUpInMemoryIndexOnClose;
}
public boolean isCleanUpTombstonesDuringOpen() {
return cleanUpTombstonesDuringOpen;
}
public void setCleanUpTombstonesDuringOpen(boolean cleanUpTombstonesDuringOpen) {
this.cleanUpTombstonesDuringOpen = cleanUpTombstonesDuringOpen;
}
public boolean isUseMemoryPool() {
return useMemoryPool;
}
public void setUseMemoryPool(boolean useMemoryPool) {
this.useMemoryPool = useMemoryPool;
}
public int getFixedKeySize() {
return fixedKeySize;
}
public void setFixedKeySize(int fixedKeySize) {
this.fixedKeySize = fixedKeySize;
}
public int getMemoryPoolChunkSize() {
return memoryPoolChunkSize;
}
public void setMemoryPoolChunkSize(int memoryPoolChunkSize) {
this.memoryPoolChunkSize = memoryPoolChunkSize;
}
public boolean isSyncWrite() {
return syncWrite;
}
public void enableSyncWrites(boolean syncWrites) {
this.syncWrite = syncWrites;
}
public int getBuildIndexThreads() {
return buildIndexThreads;
}
public void setBuildIndexThreads(int buildIndexThreads) {
int numOfProcessors = Runtime.getRuntime().availableProcessors();
if (buildIndexThreads <= 0 || buildIndexThreads > numOfProcessors) {
throw new IllegalArgumentException("buildIndexThreads should be > 0 and <= " + numOfProcessors);
}
this.buildIndexThreads = buildIndexThreads;
}
// to be used only in tests.
private boolean isCompactionDisabled = false;
// not visible to outside the package.
// to be used only in tests.
void setCompactionDisabled(boolean compactionDisabled) {
isCompactionDisabled = compactionDisabled;
}
boolean isCompactionDisabled() {
return isCompactionDisabled;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HaloDBStats.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.base.MoreObjects;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class HaloDBStats {
private final long statsResetTime;
private final long size;
private final int numberOfFilesPendingCompaction;
private final Map<Integer, Double> staleDataPercentPerFile;
private final long rehashCount;
private final long numberOfSegments;
private final long maxSizePerSegment;
private final SegmentStats[] segmentStats;
private final long numberOfTombstonesFoundDuringOpen;
private final long numberOfTombstonesCleanedUpDuringOpen;
private final int numberOfDataFiles;
private final int numberOfTombstoneFiles;
private final long numberOfRecordsCopied;
private final long numberOfRecordsReplaced;
private final long numberOfRecordsScanned;
private final long sizeOfRecordsCopied;
private final long sizeOfFilesDeleted;
private final long sizeReclaimed;
private final long compactionRateInInternal;
private final long compactionRateSinceBeginning;
private final boolean isCompactionRunning;
private final HaloDBOptions options;
public HaloDBStats(long statsResetTime, long size, boolean isCompactionRunning, int numberOfFilesPendingCompaction,
Map<Integer, Double> staleDataPercentPerFile, long rehashCount, long numberOfSegments,
long maxSizePerSegment, SegmentStats[] segmentStats,
int numberOfDataFiles, int numberOfTombstoneFiles,
long numberOfTombstonesFoundDuringOpen, long numberOfTombstonesCleanedUpDuringOpen,
long numberOfRecordsCopied, long numberOfRecordsReplaced, long numberOfRecordsScanned,
long sizeOfRecordsCopied, long sizeOfFilesDeleted, long sizeReclaimed,
long compactionRateSinceBeginning, HaloDBOptions options) {
this.statsResetTime = statsResetTime;
this.size = size;
this.numberOfFilesPendingCompaction = numberOfFilesPendingCompaction;
this.staleDataPercentPerFile = staleDataPercentPerFile;
this.rehashCount = rehashCount;
this.numberOfSegments = numberOfSegments;
this.maxSizePerSegment = maxSizePerSegment;
this.segmentStats = segmentStats;
this.numberOfDataFiles = numberOfDataFiles;
this.numberOfTombstoneFiles = numberOfTombstoneFiles;
this.numberOfTombstonesFoundDuringOpen = numberOfTombstonesFoundDuringOpen;
this.numberOfTombstonesCleanedUpDuringOpen = numberOfTombstonesCleanedUpDuringOpen;
this.numberOfRecordsCopied = numberOfRecordsCopied;
this.numberOfRecordsReplaced = numberOfRecordsReplaced;
this.numberOfRecordsScanned = numberOfRecordsScanned;
this.sizeOfRecordsCopied = sizeOfRecordsCopied;
this.sizeOfFilesDeleted = sizeOfFilesDeleted;
this.sizeReclaimed = sizeReclaimed;
this.compactionRateSinceBeginning = compactionRateSinceBeginning;
this.isCompactionRunning = isCompactionRunning;
long intervalTimeInSeconds = (System.currentTimeMillis() - statsResetTime)/1000;
if (intervalTimeInSeconds > 0) {
this.compactionRateInInternal = sizeOfRecordsCopied/intervalTimeInSeconds;
}
else {
this.compactionRateInInternal = 0;
}
this.options = options;
}
public long getSize() {
return size;
}
public int getNumberOfFilesPendingCompaction() {
return numberOfFilesPendingCompaction;
}
public Map<Integer, Double> getStaleDataPercentPerFile() {
return staleDataPercentPerFile;
}
public long getRehashCount() {
return rehashCount;
}
public long getNumberOfSegments() {
return numberOfSegments;
}
public long getMaxSizePerSegment() {
return maxSizePerSegment;
}
public long getNumberOfRecordsCopied() {
return numberOfRecordsCopied;
}
public long getNumberOfRecordsReplaced() {
return numberOfRecordsReplaced;
}
public long getNumberOfRecordsScanned() {
return numberOfRecordsScanned;
}
public long getSizeOfRecordsCopied() {
return sizeOfRecordsCopied;
}
public long getSizeOfFilesDeleted() {
return sizeOfFilesDeleted;
}
public long getSizeReclaimed() {
return sizeReclaimed;
}
public HaloDBOptions getOptions() {
return options;
}
public int getNumberOfDataFiles() {
return numberOfDataFiles;
}
public int getNumberOfTombstoneFiles() {
return numberOfTombstoneFiles;
}
public long getNumberOfTombstonesFoundDuringOpen() {
return numberOfTombstonesFoundDuringOpen;
}
public long getNumberOfTombstonesCleanedUpDuringOpen() {
return numberOfTombstonesCleanedUpDuringOpen;
}
public SegmentStats[] getSegmentStats() {
return segmentStats;
}
public long getCompactionRateInInternal() {
return compactionRateInInternal;
}
public long getCompactionRateSinceBeginning() {
return compactionRateSinceBeginning;
}
public boolean isCompactionRunning() {
return isCompactionRunning;
}
@Override
public String toString() {
return MoreObjects.toStringHelper("")
.add("statsResetTime", statsResetTime)
.add("size", size)
.add("Options", options)
.add("isCompactionRunning", isCompactionRunning)
.add("CompactionJobRateInInterval", getUnit(compactionRateInInternal))
.add("CompactionJobRateSinceBeginning", getUnit(compactionRateSinceBeginning))
.add("numberOfFilesPendingCompaction", numberOfFilesPendingCompaction)
.add("numberOfRecordsCopied", numberOfRecordsCopied)
.add("numberOfRecordsReplaced", numberOfRecordsReplaced)
.add("numberOfRecordsScanned", numberOfRecordsScanned)
.add("sizeOfRecordsCopied", sizeOfRecordsCopied)
.add("sizeOfFilesDeleted", sizeOfFilesDeleted)
.add("sizeReclaimed", sizeReclaimed)
.add("rehashCount", rehashCount)
.add("maxSizePerSegment", maxSizePerSegment)
.add("numberOfDataFiles", numberOfDataFiles)
.add("numberOfTombstoneFiles", numberOfTombstoneFiles)
.add("numberOfTombstonesFoundDuringOpen", numberOfTombstonesFoundDuringOpen)
.add("numberOfTombstonesCleanedUpDuringOpen", numberOfTombstonesCleanedUpDuringOpen)
.add("segmentStats", Arrays.toString(segmentStats))
.add("numberOfSegments", numberOfSegments)
.add("staleDataPercentPerFile", staleDataMapToString())
.toString();
}
public Map<String, String> toStringMap() {
Map<String, String> map = new HashMap<>();
map.put("statsResetTime", String.valueOf(statsResetTime));
map.put("size", String.valueOf(size));
map.put("Options", String.valueOf(options));
map.put("isCompactionRunning", String.valueOf(isCompactionRunning));
map.put("CompactionJobRateInInterval", String.valueOf(getUnit(compactionRateInInternal)));
map.put("CompactionJobRateSinceBeginning", String.valueOf(getUnit(compactionRateSinceBeginning)));
map.put("numberOfFilesPendingCompaction", String.valueOf(numberOfFilesPendingCompaction));
map.put("numberOfRecordsCopied", String.valueOf(numberOfRecordsCopied));
map.put("numberOfRecordsReplaced", String.valueOf(numberOfRecordsReplaced));
map.put("numberOfRecordsScanned", String.valueOf(numberOfRecordsScanned));
map.put("sizeOfRecordsCopied", String.valueOf(sizeOfRecordsCopied));
map.put("sizeOfFilesDeleted", String.valueOf(sizeOfFilesDeleted));
map.put("sizeReclaimed", String.valueOf(sizeReclaimed));
map.put("rehashCount", String.valueOf(rehashCount));
map.put("maxSizePerSegment", String.valueOf(maxSizePerSegment));
map.put("numberOfDataFiles", String.valueOf(numberOfDataFiles));
map.put("numberOfTombstoneFiles", String.valueOf(numberOfTombstoneFiles));
map.put("numberOfTombstonesFoundDuringOpen", String.valueOf(numberOfTombstonesFoundDuringOpen));
map.put("numberOfTombstonesCleanedUpDuringOpen", String.valueOf(numberOfTombstonesCleanedUpDuringOpen));
map.put("segmentStats", String.valueOf(Arrays.toString(segmentStats)));
map.put("numberOfSegments", String.valueOf(numberOfSegments));
map.put("staleDataPercentPerFile", String.valueOf(staleDataMapToString()));
return map;
}
private String staleDataMapToString() {
StringBuilder builder = new StringBuilder("[");
boolean isFirst = true;
for (Map.Entry<Integer, Double> e : staleDataPercentPerFile.entrySet()) {
if (!isFirst) {
builder.append(", ");
}
isFirst = false;
builder.append("{");
builder.append(e.getKey());
builder.append("=");
builder.append(String.format("%.1f", e.getValue()));
builder.append("}");
}
builder.append("]");
return builder.toString();
}
private static final String gbRateUnit = " GB/second";
private static final String mbRateUnit = " MB/second";
private static final String kbRateUnit = " KB/second";
private static final long GB = 1024 * 1024 * 1024;
private static final long MB = 1024 * 1024;
private static final long KB = 1024;
private String getUnit(long value) {
long temp = value / GB;
if (temp >= 1) {
return temp + gbRateUnit;
}
temp = value / MB;
if (temp >= 1) {
return temp + mbRateUnit;
}
temp = value / KB;
return temp + kbRateUnit;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HashAlgorithm.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
// This code is a derivative work heavily modified from the OHC project. See NOTICE file for copyright and license.
package com.oath.halodb;
enum HashAlgorithm {
MURMUR3,
CRC32,
XX
}
================================================
FILE: src/main/java/com/oath/halodb/HashTableUtil.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
// This code is a derivative work heavily modified from the OHC project. See NOTICE file for copyright and license.
package com.oath.halodb;
final class HashTableUtil {
// Hash bucket-table
static final long NON_MEMORY_POOL_BUCKET_ENTRY_LEN = 8;
static final long MEMORY_POOL_BUCKET_ENTRY_LEN = 5;
static long allocLen(long keyLen, long valueLen) {
return NonMemoryPoolHashEntries.ENTRY_OFF_DATA + keyLen + valueLen;
}
static int bitNum(long val) {
int bit = 0;
for (; val != 0L; bit++) {
val >>>= 1;
}
return bit;
}
static long roundUpToPowerOf2(long number, long max) {
return number >= max
? max
: (number > 1) ? Long.highestOneBit((number - 1) << 1) : 1;
}
}
================================================
FILE: src/main/java/com/oath/halodb/HashTableValueSerializer.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
// This code is a derivative work heavily modified from the OHC project. See NOTICE file for copyright and license.
package com.oath.halodb;
import java.nio.ByteBuffer;
/**
* Serialize and deserialize cached data using {@link ByteBuffer}
*/
interface HashTableValueSerializer<T> {
void serialize(T value, ByteBuffer buf);
T deserialize(ByteBuffer buf);
int serializedSize(T value);
}
================================================
FILE: src/main/java/com/oath/halodb/Hasher.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
// This code is a derivative work heavily modified from the OHC project. See NOTICE file for copyright and license.
package com.oath.halodb;
import net.jpountz.xxhash.XXHashFactory;
import java.util.zip.CRC32;
abstract class Hasher {
static Hasher create(HashAlgorithm hashAlgorithm) {
String cls = forAlg(hashAlgorithm);
try {
return (Hasher) Class.forName(cls).newInstance();
} catch (ClassNotFoundException e) {
if (hashAlgorithm == HashAlgorithm.XX) {
cls = forAlg(HashAlgorithm.CRC32);
try {
return (Hasher) Class.forName(cls).newInstance();
} catch (InstantiationException | ClassNotFoundException | IllegalAccessException e1) {
throw new RuntimeException(e1);
}
}
throw new RuntimeException(e);
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
private static String forAlg(HashAlgorithm hashAlgorithm) {
return Hasher.class.getName()
+ '$'
+ hashAlgorithm.name().substring(0, 1)
+ hashAlgorithm.name().substring(1).toLowerCase()
+ "Hash";
}
abstract long hash(byte[] array);
abstract long hash(long address, long offset, int length);
static final class Crc32Hash extends Hasher {
long hash(byte[] array) {
CRC32 crc = new CRC32();
crc.update(array);
long h = crc.getValue();
h |= h << 32;
return h;
}
long hash(long address, long offset, int length) {
return Uns.crc32(address, offset, length);
}
}
static final class Murmur3Hash extends Hasher {
long hash(byte[] array) {
int o = 0;
int r = array.length;
long h1 = 0L;
long h2 = 0L;
long k1, k2;
for (; r >= 16; r -= 16) {
k1 = getLong(array, o);
o += 8;
k2 = getLong(array, o);
o += 8;
// bmix64()
h1 ^= mixK1(k1);
h1 = Long.rotateLeft(h1, 27);
h1 += h2;
h1 = h1 * 5 + 0x52dce729;
h2 ^= mixK2(k2);
h2 = Long.rotateLeft(h2, 31);
h2 += h1;
h2 = h2 * 5 + 0x38495ab5;
}
if (r > 0) {
k1 = 0;
k2 = 0;
switch (r) {
case 15:
k2 ^= toLong(array[o + 14]) << 48; // fall through
case 14:
k2 ^= toLong(array[o + 13]) << 40; // fall through
case 13:
k2 ^= toLong(array[o + 12]) << 32; // fall through
case 12:
k2 ^= toLong(array[o + 11]) << 24; // fall through
case 11:
k2 ^= toLong(array[o + 10]) << 16; // fall through
case 10:
k2 ^= toLong(array[o + 9]) << 8; // fall through
case 9:
k2 ^= toLong(array[o + 8]); // fall through
case 8:
k1 ^= getLong(array, o);
break;
case 7:
k1 ^= toLong(array[o + 6]) << 48; // fall through
case 6:
k1 ^= toLong(array[o + 5]) << 40; // fall through
case 5:
k1 ^= toLong(array[o + 4]) << 32; // fall through
case 4:
k1 ^= toLong(array[o + 3]) << 24; // fall through
case 3:
k1 ^= toLong(array[o + 2]) << 16; // fall through
case 2:
k1 ^= toLong(array[o + 1]) << 8; // fall through
case 1:
k1 ^= toLong(array[o]);
break;
default:
throw new AssertionError("Should never get here.");
}
h1 ^= mixK1(k1);
h2 ^= mixK2(k2);
}
// makeHash()
h1 ^= array.length;
h2 ^= array.length;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
//h2 += h1;
// padToLong()
return h1;
}
private static long getLong(byte[] array, int o) {
long l = toLong(array[o + 7]) << 56;
l |= toLong(array[o + 6]) << 48;
l |= toLong(array[o + 5]) << 40;
l |= toLong(array[o + 4]) << 32;
l |= toLong(array[o + 3]) << 24;
l |= toLong(array[o + 2]) << 16;
l |= toLong(array[o + 1]) << 8;
l |= toLong(array[o]);
return l;
}
long hash(long adr, long offset, int length) {
long o = offset;
long r = length;
long h1 = 0L;
long h2 = 0L;
long k1, k2;
for (; r >= 16; r -= 16) {
k1 = getLong(adr, o);
o += 8;
k2 = getLong(adr, o);
o += 8;
// bmix64()
h1 ^= mixK1(k1);
h1 = Long.rotateLeft(h1, 27);
h1 += h2;
h1 = h1 * 5 + 0x52dce729;
h2 ^= mixK2(k2);
h2 = Long.rotateLeft(h2, 31);
h2 += h1;
h2 = h2 * 5 + 0x38495ab5;
}
if (r > 0) {
k1 = 0;
k2 = 0;
switch ((int) r) {
case 15:
k2 ^= toLong(Uns.getByte(adr, o + 14)) << 48; // fall through
case 14:
k2 ^= toLong(Uns.getByte(adr, o + 13)) << 40; // fall through
case 13:
k2 ^= toLong(Uns.getByte(adr, o + 12)) << 32; // fall through
case 12:
k2 ^= toLong(Uns.getByte(adr, o + 11)) << 24; // fall through
case 11:
k2 ^= toLong(Uns.getByte(adr, o + 10)) << 16; // fall through
case 10:
k2 ^= toLong(Uns.getByte(adr, o + 9)) << 8; // fall through
case 9:
k2 ^= toLong(Uns.getByte(adr, o + 8)); // fall through
case 8:
k1 ^= getLong(adr, o);
break;
case 7:
k1 ^= toLong(Uns.getByte(adr, o + 6)) << 48; // fall through
case 6:
k1 ^= toLong(Uns.getByte(adr, o + 5)) << 40; // fall through
case 5:
k1 ^= toLong(Uns.getByte(adr, o + 4)) << 32; // fall through
case 4:
k1 ^= toLong(Uns.getByte(adr, o + 3)) << 24; // fall through
case 3:
k1 ^= toLong(Uns.getByte(adr, o + 2)) << 16; // fall through
case 2:
k1 ^= toLong(Uns.getByte(adr, o + 1)) << 8; // fall through
case 1:
k1 ^= toLong(Uns.getByte(adr, o));
break;
default:
throw new AssertionError("Should never get here.");
}
h1 ^= mixK1(k1);
h2 ^= mixK2(k2);
}
// makeHash()
h1 ^= length;
h2 ^= length;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
//h2 += h1;
// padToLong()
return h1;
}
private static long getLong(long adr, long o) {
long l = toLong(Uns.getByte(adr, o + 7)) << 56;
l |= toLong(Uns.getByte(adr, o + 6)) << 48;
l |= toLong(Uns.getByte(adr, o + 5)) << 40;
l |= toLong(Uns.getByte(adr, o + 4)) << 32;
l |= toLong(Uns.getByte(adr, o + 3)) << 24;
l |= toLong(Uns.getByte(adr, o + 2)) << 16;
l |= toLong(Uns.getByte(adr, o + 1)) << 8;
l |= toLong(Uns.getByte(adr, o));
return l;
}
static final long C1 = 0x87c37b91114253d5L;
static final long C2 = 0x4cf5ad432745937fL;
static long fmix64(long k) {
k ^= k >>> 33;
k *= 0xff51afd7ed558ccdL;
k ^= k >>> 33;
k *= 0xc4ceb9fe1a85ec53L;
k ^= k >>> 33;
return k;
}
static long mixK1(long k1) {
k1 *= C1;
k1 = Long.rotateLeft(k1, 31);
k1 *= C2;
return k1;
}
static long mixK2(long k2) {
k2 *= C2;
k2 = Long.rotateLeft(k2, 33);
k2 *= C1;
return k2;
}
static long toLong(byte value) {
return value & 0xff;
}
}
static final class XxHash extends Hasher {
private static final XXHashFactory xx = XXHashFactory.fastestInstance();
long hash(long address, long offset, int length) {
return xx.hash64().hash(Uns.directBufferFor(address, offset, length, true), 0);
}
long hash(byte[] array) {
return xx.hash64().hash(array, 0, array.length, 0);
}
}
}
================================================
FILE: src/main/java/com/oath/halodb/InMemoryIndex.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import com.google.common.primitives.Ints;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Hash table stored in native memory, outside Java heap.
*/
class InMemoryIndex {
private static final Logger logger = LoggerFactory.getLogger(InMemoryIndex.class);
private final OffHeapHashTable<InMemoryIndexMetaData> offHeapHashTable;
private final int noOfSegments;
private final int maxSizeOfEachSegment;
InMemoryIndex(int numberOfKeys, boolean useMemoryPool, int fixedKeySize, int memoryPoolChunkSize) {
noOfSegments = Ints.checkedCast(Utils.roundUpToPowerOf2(Runtime.getRuntime().availableProcessors() * 2));
maxSizeOfEachSegment = Ints.checkedCast(Utils.roundUpToPowerOf2(numberOfKeys / noOfSegments));
long start = System.currentTimeMillis();
OffHeapHashTableBuilder<InMemoryIndexMetaData> builder =
OffHeapHashTableBuilder.<InMemoryIndexMetaData>newBuilder()
.valueSerializer(new InMemoryIndexMetaDataSerializer())
.segmentCount(noOfSegments)
.hashTableSize(maxSizeOfEachSegment)
.fixedValueSize(InMemoryIndexMetaData.SERIALIZED_SIZE)
.loadFactor(1);
if (useMemoryPool) {
builder.useMemoryPool(true).fixedKeySize(fixedKeySize).memoryPoolChunkSize(memoryPoolChunkSize);
}
this.offHeapHashTable = builder.build();
logger.debug("Allocated memory for the index in {}", (System.currentTimeMillis() - start));
}
boolean put(byte[] key, InMemoryIndexMetaData metaData) {
return offHeapHashTable.put(key, metaData);
}
boolean putIfAbsent(byte[] key, InMemoryIndexMetaData metaData) {
return offHeapHashTable.putIfAbsent(key, metaData);
}
boolean remove(byte[] key) {
return offHeapHashTable.remove(key);
}
boolean replace(byte[] key, InMemoryIndexMetaData oldValue, InMemoryIndexMetaData newValue) {
return offHeapHashTable.addOrReplace(key, oldValue, newValue);
}
InMemoryIndexMetaData get(byte[] key) {
return offHeapHashTable.get(key);
}
boolean containsKey(byte[] key) {
return offHeapHashTable.containsKey(key);
}
void close() {
try {
offHeapHashTable.close();
} catch (IOException e) {
e.printStackTrace();
}
}
long size() {
return offHeapHashTable.size();
}
public OffHeapHashTableStats stats() {
return offHeapHashTable.stats();
}
void resetStats() {
offHeapHashTable.resetStatistics();
}
int getNoOfSegments() {
return noOfSegments;
}
int getMaxSizeOfEachSegment() {
return maxSizeOfEachSegment;
}
}
================================================
FILE: src/main/java/com/oath/halodb/InMemoryIndexMetaData.java
================================================
/*
* Copyright 2018, Oath Inc
* Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICENSE file for terms.
*/
package com.oath.halodb;
import java.nio.ByteBuffer;
/**
* Metadata stored in the in-memory
gitextract_fajmuc1o/
├── .github/
│ └── workflows/
│ ├── maven-publish.yml
│ └── maven.yml
├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── Code-of-Conduct.md
├── LICENSE
├── NOTICE
├── README.md
├── benchmarks/
│ ├── README.md
│ ├── pom.xml
│ └── src/
│ └── main/
│ └── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ └── benchmarks/
│ ├── BenchmarkTool.java
│ ├── Benchmarks.java
│ ├── HaloDBStorageEngine.java
│ ├── KyotoStorageEngine.java
│ ├── RandomDataGenerator.java
│ ├── RocksDBStorageEngine.java
│ └── StorageEngine.java
├── docs/
│ ├── WhyHaloDB.md
│ └── benchmarks.md
├── pom.xml
└── src/
├── main/
│ └── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ ├── CompactionManager.java
│ ├── Constants.java
│ ├── DBDirectory.java
│ ├── DBMetaData.java
│ ├── FileUtils.java
│ ├── HaloDB.java
│ ├── HaloDBException.java
│ ├── HaloDBFile.java
│ ├── HaloDBInternal.java
│ ├── HaloDBIterator.java
│ ├── HaloDBKeyIterator.java
│ ├── HaloDBOptions.java
│ ├── HaloDBStats.java
│ ├── HashAlgorithm.java
│ ├── HashTableUtil.java
│ ├── HashTableValueSerializer.java
│ ├── Hasher.java
│ ├── InMemoryIndex.java
│ ├── InMemoryIndexMetaData.java
│ ├── InMemoryIndexMetaDataSerializer.java
│ ├── IndexFile.java
│ ├── IndexFileEntry.java
│ ├── JNANativeAllocator.java
│ ├── KeyBuffer.java
│ ├── LongArrayList.java
│ ├── MemoryPoolAddress.java
│ ├── MemoryPoolChunk.java
│ ├── MemoryPoolHashEntries.java
│ ├── NativeMemoryAllocator.java
│ ├── NonMemoryPoolHashEntries.java
│ ├── OffHeapHashTable.java
│ ├── OffHeapHashTableBuilder.java
│ ├── OffHeapHashTableImpl.java
│ ├── OffHeapHashTableStats.java
│ ├── Record.java
│ ├── RecordKey.java
│ ├── Segment.java
│ ├── SegmentNonMemoryPool.java
│ ├── SegmentStats.java
│ ├── SegmentWithMemoryPool.java
│ ├── TombstoneEntry.java
│ ├── TombstoneFile.java
│ ├── Uns.java
│ ├── UnsExt.java
│ ├── UnsExt8.java
│ ├── UnsafeAllocator.java
│ ├── Utils.java
│ ├── Versions.java
│ └── histo/
│ └── EstimatedHistogram.java
└── test/
├── java/
│ └── com/
│ └── oath/
│ └── halodb/
│ ├── CheckOffHeapHashTable.java
│ ├── CheckSegment.java
│ ├── CompactionWithErrorsTest.java
│ ├── CrossCheckTest.java
│ ├── DBDirectoryTest.java
│ ├── DBMetaDataTest.java
│ ├── DBRepairTest.java
│ ├── DataConsistencyDB.java
│ ├── DataConsistencyTest.java
│ ├── DoubleCheckOffHeapHashTableImpl.java
│ ├── FileUtilsTest.java
│ ├── HaloDBCompactionTest.java
│ ├── HaloDBDeletionTest.java
│ ├── HaloDBFileCompactionTest.java
│ ├── HaloDBFileTest.java
│ ├── HaloDBIteratorTest.java
│ ├── HaloDBKeyIteratorTest.java
│ ├── HaloDBOptionsTest.java
│ ├── HaloDBStatsTest.java
│ ├── HaloDBTest.java
│ ├── HashTableTestUtils.java
│ ├── HashTableUtilTest.java
│ ├── HashTableValueSerializerTest.java
│ ├── HasherTest.java
│ ├── IndexFileEntryTest.java
│ ├── KeyBufferTest.java
│ ├── LinkedImplTest.java
│ ├── LongArrayListTest.java
│ ├── MemoryPoolChunkTest.java
│ ├── NonMemoryPoolHashEntriesTest.java
│ ├── OffHeapHashTableBuilderTest.java
│ ├── RandomDataGenerator.java
│ ├── RecordTest.java
│ ├── RehashTest.java
│ ├── SegmentWithMemoryPoolTest.java
│ ├── SequenceNumberTest.java
│ ├── SyncWriteTest.java
│ ├── TestBase.java
│ ├── TestListener.java
│ ├── TestUtils.java
│ ├── TombstoneFileCleanUpTest.java
│ ├── TombstoneFileTest.java
│ ├── UnsTest.java
│ └── histo/
│ └── EstimatedHistogramTest.java
└── resources/
└── log4j2-test.xml
SYMBOL INDEX (1172 symbols across 100 files)
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/BenchmarkTool.java
class BenchmarkTool (line 20) | public class BenchmarkTool {
method main (line 43) | public static void main(String[] args) throws Exception {
method createDB (line 78) | private static void createDB(StorageEngine db, boolean isSequential) {
method update (line 102) | private static void update(StorageEngine db) {
method readRandom (line 127) | private static void readRandom(StorageEngine db, int threads) {
method updateWithReads (line 162) | private static void updateWithReads(StorageEngine db) {
class Read (line 239) | static class Read extends Thread {
method Read (line 247) | Read(StorageEngine db, int id) {
method run (line 253) | @Override
method longToBytes (line 282) | public static byte[] longToBytes(long value) {
method printDate (line 286) | public static String printDate() {
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/Benchmarks.java
type Benchmarks (line 8) | public enum Benchmarks {
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/HaloDBStorageEngine.java
class HaloDBStorageEngine (line 14) | public class HaloDBStorageEngine implements StorageEngine {
method HaloDBStorageEngine (line 21) | public HaloDBStorageEngine(File dbDirectory, long noOfRecords) {
method put (line 26) | @Override
method get (line 36) | @Override
method delete (line 47) | @Override
method open (line 56) | @Override
method close (line 74) | @Override
method size (line 86) | @Override
method printStats (line 91) | @Override
method stats (line 96) | @Override
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/KyotoStorageEngine.java
class KyotoStorageEngine (line 11) | public class KyotoStorageEngine implements StorageEngine {
method KyotoStorageEngine (line 18) | public KyotoStorageEngine(File dbDirectory, int noOfRecords) {
method open (line 23) | @Override
method put (line 46) | @Override
method get (line 51) | @Override
method close (line 56) | @Override
method size (line 61) | @Override
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/RandomDataGenerator.java
class RandomDataGenerator (line 9) | public class RandomDataGenerator {
method RandomDataGenerator (line 15) | public RandomDataGenerator(int seed) {
method getData (line 21) | public byte[] getData(int length) {
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/RocksDBStorageEngine.java
class RocksDBStorageEngine (line 19) | public class RocksDBStorageEngine implements StorageEngine {
method RocksDBStorageEngine (line 28) | public RocksDBStorageEngine(File dbDirectory, int noOfRecords) {
method put (line 32) | @Override
method get (line 42) | @Override
method open (line 54) | @Override
method close (line 121) | @Override
FILE: benchmarks/src/main/java/com/oath/halodb/benchmarks/StorageEngine.java
type StorageEngine (line 7) | public interface StorageEngine {
method put (line 9) | void put(byte[] key, byte[] value);
method stats (line 11) | default String stats() { return "";}
method get (line 13) | byte[] get(byte[] key);
method delete (line 15) | default void delete(byte[] key) {}
method open (line 17) | void open();
method close (line 19) | void close();
method size (line 21) | default long size() {return 0;}
method printStats (line 23) | default void printStats() {
FILE: src/main/java/com/oath/halodb/CompactionManager.java
class CompactionManager (line 20) | class CompactionManager {
method CompactionManager (line 49) | CompactionManager(HaloDBInternal dbInternal) {
method stopCompactionThread (line 56) | boolean stopCompactionThread(boolean closeCurrentWriteFile) throws IOE...
method startCompactionThread (line 85) | void startCompactionThread() {
method pauseCompactionThread (line 98) | void pauseCompactionThread() throws IOException {
method resumeCompaction (line 103) | void resumeCompaction() {
method getCurrentWriteFileId (line 108) | int getCurrentWriteFileId() {
method submitFileForCompaction (line 112) | boolean submitFileForCompaction(int fileId) {
method noOfFilesPendingCompaction (line 116) | int noOfFilesPendingCompaction() {
method getNumberOfRecordsCopied (line 120) | long getNumberOfRecordsCopied() {
method getNumberOfRecordsReplaced (line 124) | long getNumberOfRecordsReplaced() {
method getNumberOfRecordsScanned (line 128) | long getNumberOfRecordsScanned() {
method getSizeOfRecordsCopied (line 132) | long getSizeOfRecordsCopied() {
method getSizeOfFilesDeleted (line 136) | long getSizeOfFilesDeleted() {
method getCompactionJobRateSinceBeginning (line 140) | long getCompactionJobRateSinceBeginning() {
method resetStats (line 149) | void resetStats() {
method isCompactionRunning (line 154) | boolean isCompactionRunning() {
class CompactionThread (line 158) | private class CompactionThread extends Thread {
method CompactionThread (line 162) | CompactionThread() {
method run (line 193) | @Override
method copyFreshRecordsToNewFile (line 221) | private void copyFreshRecordsToNewFile(int idOfFileToCompact) throws...
method isRecordFresh (line 302) | private boolean isRecordFresh(IndexFileEntry entry, InMemoryIndexMet...
method rollOverCurrentWriteFile (line 308) | private void rollOverCurrentWriteFile(int recordSize) throws IOExcep...
method forceRolloverCurrentWriteFile (line 316) | void forceRolloverCurrentWriteFile() throws IOException {
method isCompactionComplete (line 327) | @VisibleForTesting
FILE: src/main/java/com/oath/halodb/Constants.java
class Constants (line 10) | class Constants {
FILE: src/main/java/com/oath/halodb/DBDirectory.java
class DBDirectory (line 18) | class DBDirectory {
method DBDirectory (line 23) | private DBDirectory(File dbDirectory, FileChannel directoryChannel) {
method open (line 31) | static DBDirectory open(File directory) throws IOException {
method close (line 46) | void close() throws IOException {
method getPath (line 52) | Path getPath() {
method listDataFiles (line 56) | File[] listDataFiles() {
method listIndexFiles (line 60) | List<Integer> listIndexFiles() {
method listTombstoneFiles (line 64) | File[] listTombstoneFiles() {
method syncMetaData (line 68) | void syncMetaData() throws IOException {
method openReadOnlyChannel (line 84) | private static FileChannel openReadOnlyChannel(File dbDirectory) throw...
method isWindows (line 88) | private static boolean isWindows() {
FILE: src/main/java/com/oath/halodb/DBMetaData.java
class DBMetaData (line 22) | class DBMetaData {
method DBMetaData (line 49) | DBMetaData(DBDirectory dbDirectory) {
method loadFromFileIfExists (line 53) | void loadFromFileIfExists() throws IOException {
method storeToFile (line 72) | void storeToFile() throws IOException {
method computeCheckSum (line 97) | private long computeCheckSum(byte[] header) {
method isValid (line 103) | boolean isValid() {
method isOpen (line 115) | boolean isOpen() {
method setOpen (line 119) | void setOpen(boolean open) {
method getSequenceNumber (line 123) | long getSequenceNumber() {
method setSequenceNumber (line 127) | void setSequenceNumber(long sequenceNumber) {
method isIOError (line 131) | boolean isIOError() {
method setIOError (line 135) | void setIOError(boolean ioError) {
method getVersion (line 139) | public int getVersion() {
method setVersion (line 143) | public void setVersion(int version) {
method getMaxFileSize (line 147) | public int getMaxFileSize() {
method setMaxFileSize (line 151) | public void setMaxFileSize(int maxFileSize) {
FILE: src/main/java/com/oath/halodb/FileUtils.java
class FileUtils (line 22) | class FileUtils {
method createDirectoryIfNotExists (line 24) | static void createDirectoryIfNotExists(File directory) throws IOExcept...
method deleteDirectory (line 37) | static void deleteDirectory(File dir) throws IOException {
method listIndexFiles (line 52) | static List<Integer> listIndexFiles(File directory) {
method listTombstoneFiles (line 67) | static File[] listTombstoneFiles(File directory) {
method listDataFiles (line 80) | static File[] listDataFiles(File directory) {
method getFileId (line 84) | private static int getFileId(File file, Pattern pattern) {
FILE: src/main/java/com/oath/halodb/HaloDB.java
class HaloDB (line 14) | public final class HaloDB {
method open (line 20) | public static HaloDB open(File dirname, HaloDBOptions opts) throws Hal...
method open (line 31) | public static HaloDB open(String directory, HaloDBOptions opts) throws...
method get (line 35) | public byte[] get(byte[] key) throws HaloDBException {
method put (line 43) | public boolean put(byte[] key, byte[] value) throws HaloDBException {
method delete (line 51) | public void delete(byte[] key) throws HaloDBException {
method close (line 59) | public void close() throws HaloDBException {
method size (line 67) | public long size() {
method stats (line 71) | public HaloDBStats stats() {
method resetStats (line 75) | public void resetStats() {
method newIterator (line 79) | public HaloDBIterator newIterator() throws HaloDBException {
method newKeyIterator (line 83) | public HaloDBKeyIterator newKeyIterator() {
method pauseCompaction (line 87) | public void pauseCompaction() throws HaloDBException {
method snapshot (line 95) | public boolean snapshot() {
method clearSnapshot (line 99) | public boolean clearSnapshot() {
method getSnapshotDirectory (line 103) | public File getSnapshotDirectory() {
method resumeCompaction (line 107) | public void resumeCompaction() {
method isCompactionComplete (line 113) | @VisibleForTesting
method isTombstoneFilesMerging (line 118) | @VisibleForTesting
FILE: src/main/java/com/oath/halodb/HaloDBException.java
class HaloDBException (line 7) | public class HaloDBException extends Exception {
method HaloDBException (line 10) | public HaloDBException(String message) {
method HaloDBException (line 14) | public HaloDBException(String message, Throwable cause) {
method HaloDBException (line 18) | public HaloDBException(Throwable cause) {
FILE: src/main/java/com/oath/halodb/HaloDBFile.java
class HaloDBFile (line 30) | class HaloDBFile {
method HaloDBFile (line 51) | private HaloDBFile(int fileId, File backingFile, DBDirectory dbDirecto...
method readFromFile (line 63) | byte[] readFromFile(int offset, int length) throws IOException {
method readFromFile (line 72) | int readFromFile(long position, ByteBuffer destinationBuffer) throws I...
method readRecord (line 83) | private Record readRecord(int offset) throws HaloDBException, IOExcept...
method writeRecord (line 113) | InMemoryIndexMetaData writeRecord(Record record) throws IOException {
method rebuildIndexFile (line 131) | void rebuildIndexFile() throws IOException {
method repairFile (line 158) | HaloDBFile repairFile(DBDirectory dbDirectory) throws IOException {
method createRepairFile (line 187) | private HaloDBFile createRepairFile() throws IOException {
method writeToChannel (line 200) | private long writeToChannel(ByteBuffer[] buffers) throws IOException {
method flushToDisk (line 220) | void flushToDisk() throws IOException {
method getWriteOffset (line 225) | long getWriteOffset() {
method setWriteOffset (line 229) | void setWriteOffset(int writeOffset) {
method getSize (line 233) | long getSize() {
method getIndexFile (line 237) | IndexFile getIndexFile() {
method getChannel (line 241) | FileChannel getChannel() {
method getFileType (line 245) | FileType getFileType() {
method getFileId (line 249) | int getFileId() {
method openForReading (line 253) | static HaloDBFile openForReading(DBDirectory dbDirectory, File filenam...
method create (line 262) | static HaloDBFile create(DBDirectory dbDirectory, int fileId, HaloDBOp...
method newIterator (line 282) | HaloDBFileIterator newIterator() throws IOException {
method close (line 286) | void close() throws IOException {
method delete (line 295) | void delete() throws IOException {
method getName (line 304) | String getName() {
method getPath (line 308) | Path getPath() {
method getDataFile (line 312) | private static File getDataFile(DBDirectory dbDirectory, int fileId) {
method getCompactedDataFile (line 316) | private static File getCompactedDataFile(DBDirectory dbDirectory, int ...
method findFileType (line 320) | static FileType findFileType(File file) {
method getFileTimeStamp (line 325) | static int getFileTimeStamp(File file) {
class HaloDBFileIterator (line 337) | class HaloDBFileIterator implements Iterator<Record> {
method HaloDBFileIterator (line 342) | HaloDBFileIterator() throws IOException {
method hasNext (line 346) | @Override
method next (line 351) | @Override
type FileType (line 368) | enum FileType {
FILE: src/main/java/com/oath/halodb/HaloDBInternal.java
class HaloDBInternal (line 41) | class HaloDBInternal {
method HaloDBInternal (line 82) | private HaloDBInternal() {}
method open (line 84) | static HaloDBInternal open(File directory, HaloDBOptions options) thro...
method close (line 160) | synchronized void close() throws IOException {
method put (line 218) | boolean put(byte[] key, byte[] value) throws IOException, HaloDBExcept...
method get (line 240) | byte[] get(byte[] key, int attemptNumber) throws IOException, HaloDBEx...
method get (line 270) | int get(byte[] key, ByteBuffer buffer) throws IOException {
method takeSnapshot (line 302) | synchronized boolean takeSnapshot() {
method getSnapshotDirectory (line 369) | File getSnapshotDirectory() {
method clearSnapshot (line 374) | boolean clearSnapshot() {
method delete (line 391) | void delete(byte[] key) throws IOException {
method size (line 409) | long size() {
method setIOErrorFlag (line 413) | void setIOErrorFlag() throws IOException {
method pauseCompaction (line 420) | void pauseCompaction() throws IOException {
method resumeCompaction (line 424) | void resumeCompaction() {
method writeRecordToFile (line 428) | private InMemoryIndexMetaData writeRecordToFile(Record record) throws ...
method rollOverCurrentWriteFile (line 433) | private void rollOverCurrentWriteFile(Record record) throws IOException {
method forceRollOverCurrentWriteFile (line 441) | private void forceRollOverCurrentWriteFile() throws IOException {
method rollOverTombstoneFile (line 450) | private TombstoneFile rollOverTombstoneFile(TombstoneEntry entry, Tomb...
method forceRollOverTombstoneFile (line 460) | private TombstoneFile forceRollOverTombstoneFile(TombstoneFile tombsto...
method markPreviousVersionAsStale (line 472) | private void markPreviousVersionAsStale(byte[] key) {
method markPreviousVersionAsStale (line 479) | private void markPreviousVersionAsStale(byte[] key, InMemoryIndexMetaD...
method addFileToCompactionQueueIfThresholdCrossed (line 484) | void addFileToCompactionQueueIfThresholdCrossed(int fileId, int staleR...
method updateStaleDataMap (line 501) | private int updateStaleDataMap(int fileId, int staleDataSize) {
method markFileAsCompacted (line 505) | void markFileAsCompacted(int fileId) {
method getInMemoryIndex (line 509) | InMemoryIndex getInMemoryIndex() {
method createHaloDBFile (line 513) | HaloDBFile createHaloDBFile(HaloDBFile.FileType fileType) throws IOExc...
method openDataFilesForReading (line 521) | private List<HaloDBFile> openDataFilesForReading() throws IOException {
method buildReadFileMap (line 538) | private int buildReadFileMap() throws HaloDBException, IOException {
method getNextFileId (line 556) | private int getNextFileId() {
method getLatestDataFile (line 560) | private Optional<HaloDBFile> getLatestDataFile(HaloDBFile.FileType fil...
method buildInMemoryIndex (line 567) | private long buildInMemoryIndex() throws IOException {
method buildInMemoryIndex (line 580) | private long buildInMemoryIndex(ExecutorService executor) throws IOExc...
class ProcessIndexFileTask (line 632) | class ProcessIndexFileTask implements Callable<Long> {
method ProcessIndexFileTask (line 636) | public ProcessIndexFileTask(IndexFile indexFile, int fileId) {
method call (line 641) | @Override
class ProcessTombstoneFileTask (line 688) | class ProcessTombstoneFileTask implements Callable<Long> {
method ProcessTombstoneFileTask (line 691) | public ProcessTombstoneFileTask(TombstoneFile tombstoneFile) {
method call (line 695) | @Override
method getHaloDBFile (line 749) | HaloDBFile getHaloDBFile(int fileId) {
method deleteHaloDBFile (line 753) | void deleteHaloDBFile(int fileId) throws IOException {
method mergeTombstoneFiles (line 776) | private void mergeTombstoneFiles() {
method repairFiles (line 826) | private void repairFiles() {
method getLock (line 862) | private FileLock getLock() throws HaloDBException {
method getDbDirectory (line 882) | DBDirectory getDbDirectory() {
method listDataFileIds (line 886) | Set<Integer> listDataFileIds() {
method isRecordFresh (line 890) | boolean isRecordFresh(byte[] key, InMemoryIndexMetaData metaData) {
method getNextSequenceNumber (line 901) | private long getNextSequenceNumber() {
method getCurrentWriteFileId (line 905) | private int getCurrentWriteFileId() {
method checkIfOptionsAreCorrect (line 909) | private static void checkIfOptionsAreCorrect(HaloDBOptions options) {
method isClosing (line 915) | boolean isClosing() {
method stats (line 919) | HaloDBStats stats() {
method resetStats (line 947) | synchronized void resetStats() {
method computeStaleDataMapForStats (line 953) | private Map<Integer, Double> computeStaleDataMapForStats() {
method isCompactionComplete (line 967) | @VisibleForTesting
method isTombstoneFilesMerging (line 972) | @VisibleForTesting
FILE: src/main/java/com/oath/halodb/HaloDBIterator.java
class HaloDBIterator (line 16) | public class HaloDBIterator implements Iterator<Record> {
method HaloDBIterator (line 27) | HaloDBIterator(HaloDBInternal dbInternal) {
method hasNext (line 32) | @Override
method next (line 59) | @Override
method moveToNextFile (line 69) | private boolean moveToNextFile() throws IOException {
method readNextRecord (line 91) | private boolean readNextRecord() {
method readRecordFromDataFile (line 115) | private Record readRecordFromDataFile(IndexFileEntry entry) throws IOE...
FILE: src/main/java/com/oath/halodb/HaloDBKeyIterator.java
class HaloDBKeyIterator (line 9) | public class HaloDBKeyIterator implements Iterator<RecordKey>{
method HaloDBKeyIterator (line 20) | HaloDBKeyIterator(HaloDBInternal dbInternal) {
method hasNext (line 25) | @Override
method next (line 52) | @Override
method moveToNextFile (line 62) | private boolean moveToNextFile() throws IOException {
method readNextRecord (line 84) | private boolean readNextRecord() {
method readValidRecordKey (line 108) | private RecordKey readValidRecordKey(IndexFileEntry entry) throws IOEx...
FILE: src/main/java/com/oath/halodb/HaloDBOptions.java
class HaloDBOptions (line 10) | public class HaloDBOptions implements Cloneable {
method clone (line 49) | public HaloDBOptions clone() {
method toString (line 57) | @Override
method setCompactionThresholdPerFile (line 76) | public void setCompactionThresholdPerFile(double compactionThresholdPe...
method setMaxFileSize (line 80) | public void setMaxFileSize(int maxFileSize) {
method setMaxTombstoneFileSize (line 87) | public void setMaxTombstoneFileSize(int maxFileSize) {
method setFlushDataSizeBytes (line 94) | public void setFlushDataSizeBytes(long flushDataSizeBytes) {
method setNumberOfRecords (line 98) | public void setNumberOfRecords(int numberOfRecords) {
method setCompactionJobRate (line 102) | public void setCompactionJobRate(int compactionJobRate) {
method setCleanUpInMemoryIndexOnClose (line 106) | public void setCleanUpInMemoryIndexOnClose(boolean cleanUpInMemoryInde...
method getCompactionThresholdPerFile (line 110) | public double getCompactionThresholdPerFile() {
method getMaxFileSize (line 114) | public int getMaxFileSize() {
method getMaxTombstoneFileSize (line 118) | public int getMaxTombstoneFileSize() {
method getFlushDataSizeBytes (line 122) | public long getFlushDataSizeBytes() {
method getNumberOfRecords (line 126) | public int getNumberOfRecords() {
method getCompactionJobRate (line 130) | public int getCompactionJobRate() {
method isCleanUpInMemoryIndexOnClose (line 134) | public boolean isCleanUpInMemoryIndexOnClose() {
method isCleanUpTombstonesDuringOpen (line 138) | public boolean isCleanUpTombstonesDuringOpen() {
method setCleanUpTombstonesDuringOpen (line 142) | public void setCleanUpTombstonesDuringOpen(boolean cleanUpTombstonesDu...
method isUseMemoryPool (line 146) | public boolean isUseMemoryPool() {
method setUseMemoryPool (line 150) | public void setUseMemoryPool(boolean useMemoryPool) {
method getFixedKeySize (line 154) | public int getFixedKeySize() {
method setFixedKeySize (line 158) | public void setFixedKeySize(int fixedKeySize) {
method getMemoryPoolChunkSize (line 162) | public int getMemoryPoolChunkSize() {
method setMemoryPoolChunkSize (line 166) | public void setMemoryPoolChunkSize(int memoryPoolChunkSize) {
method isSyncWrite (line 170) | public boolean isSyncWrite() {
method enableSyncWrites (line 174) | public void enableSyncWrites(boolean syncWrites) {
method getBuildIndexThreads (line 178) | public int getBuildIndexThreads() {
method setBuildIndexThreads (line 182) | public void setBuildIndexThreads(int buildIndexThreads) {
method setCompactionDisabled (line 195) | void setCompactionDisabled(boolean compactionDisabled) {
method isCompactionDisabled (line 198) | boolean isCompactionDisabled() {
FILE: src/main/java/com/oath/halodb/HaloDBStats.java
class HaloDBStats (line 14) | public class HaloDBStats {
method HaloDBStats (line 48) | public HaloDBStats(long statsResetTime, long size, boolean isCompactio...
method getSize (line 88) | public long getSize() {
method getNumberOfFilesPendingCompaction (line 92) | public int getNumberOfFilesPendingCompaction() {
method getStaleDataPercentPerFile (line 96) | public Map<Integer, Double> getStaleDataPercentPerFile() {
method getRehashCount (line 100) | public long getRehashCount() {
method getNumberOfSegments (line 104) | public long getNumberOfSegments() {
method getMaxSizePerSegment (line 108) | public long getMaxSizePerSegment() {
method getNumberOfRecordsCopied (line 112) | public long getNumberOfRecordsCopied() {
method getNumberOfRecordsReplaced (line 116) | public long getNumberOfRecordsReplaced() {
method getNumberOfRecordsScanned (line 120) | public long getNumberOfRecordsScanned() {
method getSizeOfRecordsCopied (line 124) | public long getSizeOfRecordsCopied() {
method getSizeOfFilesDeleted (line 128) | public long getSizeOfFilesDeleted() {
method getSizeReclaimed (line 132) | public long getSizeReclaimed() {
method getOptions (line 136) | public HaloDBOptions getOptions() {
method getNumberOfDataFiles (line 140) | public int getNumberOfDataFiles() {
method getNumberOfTombstoneFiles (line 144) | public int getNumberOfTombstoneFiles() {
method getNumberOfTombstonesFoundDuringOpen (line 148) | public long getNumberOfTombstonesFoundDuringOpen() {
method getNumberOfTombstonesCleanedUpDuringOpen (line 152) | public long getNumberOfTombstonesCleanedUpDuringOpen() {
method getSegmentStats (line 156) | public SegmentStats[] getSegmentStats() {
method getCompactionRateInInternal (line 160) | public long getCompactionRateInInternal() {
method getCompactionRateSinceBeginning (line 164) | public long getCompactionRateSinceBeginning() {
method isCompactionRunning (line 168) | public boolean isCompactionRunning() {
method toString (line 172) | @Override
method toStringMap (line 200) | public Map<String, String> toStringMap() {
method staleDataMapToString (line 228) | private String staleDataMapToString() {
method getUnit (line 255) | private String getUnit(long value) {
FILE: src/main/java/com/oath/halodb/HashAlgorithm.java
type HashAlgorithm (line 10) | enum HashAlgorithm {
FILE: src/main/java/com/oath/halodb/HashTableUtil.java
class HashTableUtil (line 10) | final class HashTableUtil {
method allocLen (line 17) | static long allocLen(long keyLen, long valueLen) {
method bitNum (line 21) | static int bitNum(long val) {
method roundUpToPowerOf2 (line 29) | static long roundUpToPowerOf2(long number, long max) {
FILE: src/main/java/com/oath/halodb/HashTableValueSerializer.java
type HashTableValueSerializer (line 15) | interface HashTableValueSerializer<T> {
method serialize (line 17) | void serialize(T value, ByteBuffer buf);
method deserialize (line 19) | T deserialize(ByteBuffer buf);
method serializedSize (line 21) | int serializedSize(T value);
FILE: src/main/java/com/oath/halodb/Hasher.java
class Hasher (line 14) | abstract class Hasher {
method create (line 16) | static Hasher create(HashAlgorithm hashAlgorithm) {
method forAlg (line 35) | private static String forAlg(HashAlgorithm hashAlgorithm) {
method hash (line 43) | abstract long hash(byte[] array);
method hash (line 45) | abstract long hash(long address, long offset, int length);
class Crc32Hash (line 47) | static final class Crc32Hash extends Hasher {
method hash (line 49) | long hash(byte[] array) {
method hash (line 57) | long hash(long address, long offset, int length) {
class Murmur3Hash (line 62) | static final class Murmur3Hash extends Hasher {
method hash (line 64) | long hash(byte[] array) {
method getLong (line 155) | private static long getLong(byte[] array, int o) {
method hash (line 167) | long hash(long adr, long offset, int length) {
method getLong (line 259) | private static long getLong(long adr, long o) {
method fmix64 (line 274) | static long fmix64(long k) {
method mixK1 (line 283) | static long mixK1(long k1) {
method mixK2 (line 290) | static long mixK2(long k2) {
method toLong (line 297) | static long toLong(byte value) {
class XxHash (line 302) | static final class XxHash extends Hasher {
method hash (line 306) | long hash(long address, long offset, int length) {
method hash (line 310) | long hash(byte[] array) {
FILE: src/main/java/com/oath/halodb/InMemoryIndex.java
class InMemoryIndex (line 18) | class InMemoryIndex {
method InMemoryIndex (line 26) | InMemoryIndex(int numberOfKeys, boolean useMemoryPool, int fixedKeySiz...
method put (line 47) | boolean put(byte[] key, InMemoryIndexMetaData metaData) {
method putIfAbsent (line 51) | boolean putIfAbsent(byte[] key, InMemoryIndexMetaData metaData) {
method remove (line 55) | boolean remove(byte[] key) {
method replace (line 59) | boolean replace(byte[] key, InMemoryIndexMetaData oldValue, InMemoryIn...
method get (line 63) | InMemoryIndexMetaData get(byte[] key) {
method containsKey (line 67) | boolean containsKey(byte[] key) {
method close (line 71) | void close() {
method size (line 79) | long size() {
method stats (line 83) | public OffHeapHashTableStats stats() {
method resetStats (line 87) | void resetStats() {
method getNoOfSegments (line 91) | int getNoOfSegments() {
method getMaxSizeOfEachSegment (line 95) | int getMaxSizeOfEachSegment() {
FILE: src/main/java/com/oath/halodb/InMemoryIndexMetaData.java
class InMemoryIndexMetaData (line 14) | class InMemoryIndexMetaData {
method InMemoryIndexMetaData (line 23) | InMemoryIndexMetaData(int fileId, int valueOffset, int valueSize, long...
method serialize (line 30) | void serialize(ByteBuffer byteBuffer) {
method deserialize (line 38) | static InMemoryIndexMetaData deserialize(ByteBuffer byteBuffer) {
method getFileId (line 47) | int getFileId() {
method getValueOffset (line 51) | int getValueOffset() {
method getValueSize (line 55) | int getValueSize() {
method getSequenceNumber (line 59) | long getSequenceNumber() {
FILE: src/main/java/com/oath/halodb/InMemoryIndexMetaDataSerializer.java
class InMemoryIndexMetaDataSerializer (line 10) | class InMemoryIndexMetaDataSerializer implements HashTableValueSerialize...
method serialize (line 12) | public void serialize(InMemoryIndexMetaData recordMetaData, ByteBuffer...
method deserialize (line 17) | public InMemoryIndexMetaData deserialize(ByteBuffer byteBuffer) {
method serializedSize (line 21) | public int serializedSize(InMemoryIndexMetaData recordMetaData) {
FILE: src/main/java/com/oath/halodb/IndexFile.java
class IndexFile (line 21) | class IndexFile {
method IndexFile (line 37) | IndexFile(int fileId, DBDirectory dbDirectory, HaloDBOptions options) {
method create (line 43) | void create() throws IOException {
method createRepairFile (line 51) | void createRepairFile() throws IOException {
method open (line 60) | void open() throws IOException {
method close (line 65) | void close() throws IOException {
method delete (line 71) | void delete() throws IOException {
method write (line 78) | void write(IndexFileEntry entry) throws IOException {
method flushToDisk (line 98) | void flushToDisk() throws IOException {
method newIterator (line 103) | IndexFileIterator newIterator() throws IOException {
method getPath (line 107) | Path getPath() {
method getIndexFile (line 111) | private File getIndexFile() {
method getRepairFile (line 115) | private File getRepairFile() {
class IndexFileIterator (line 119) | public class IndexFileIterator implements Iterator<IndexFileEntry> {
method IndexFileIterator (line 125) | public IndexFileIterator() throws IOException {
method hasNext (line 129) | @Override
method next (line 134) | @Override
FILE: src/main/java/com/oath/halodb/IndexFileEntry.java
class IndexFileEntry (line 14) | class IndexFileEntry {
method IndexFileEntry (line 43) | IndexFileEntry(byte[] key, int recordSize, int recordOffset, long sequ...
method serialize (line 54) | ByteBuffer[] serialize() {
method deserialize (line 69) | static IndexFileEntry deserialize(ByteBuffer buffer) {
method deserializeIfNotCorrupted (line 83) | static IndexFileEntry deserializeIfNotCorrupted(ByteBuffer buffer) {
method computeCheckSum (line 112) | private long computeCheckSum(byte[] header) {
method computeCheckSum (line 119) | long computeCheckSum() {
method getKey (line 129) | byte[] getKey() {
method getRecordSize (line 133) | int getRecordSize() {
method getRecordOffset (line 137) | int getRecordOffset() {
method getSequenceNumber (line 141) | long getSequenceNumber() {
method getVersion (line 145) | int getVersion() {
method getCheckSum (line 149) | long getCheckSum() {
FILE: src/main/java/com/oath/halodb/JNANativeAllocator.java
class JNANativeAllocator (line 12) | final class JNANativeAllocator implements NativeMemoryAllocator {
method allocate (line 14) | public long allocate(long size) {
method free (line 22) | public void free(long peer) {
method getTotalAllocated (line 26) | public long getTotalAllocated() {
FILE: src/main/java/com/oath/halodb/KeyBuffer.java
class KeyBuffer (line 12) | final class KeyBuffer {
method KeyBuffer (line 17) | KeyBuffer(byte[] buffer) {
method hash (line 21) | long hash() {
method finish (line 25) | KeyBuffer finish(Hasher hasher) {
method equals (line 31) | public boolean equals(Object o) {
method size (line 44) | public int size() {
method hashCode (line 48) | public int hashCode() {
method pad (line 52) | private static String pad(int val) {
method toString (line 60) | @Override
method sameKey (line 76) | boolean sameKey(long hashEntryAdr) {
method compareKey (line 81) | private boolean compareKey(long hashEntryAdr) {
FILE: src/main/java/com/oath/halodb/LongArrayList.java
class LongArrayList (line 12) | final class LongArrayList {
method LongArrayList (line 17) | public LongArrayList() {
method LongArrayList (line 21) | public LongArrayList(int initialCapacity) {
method getLong (line 25) | public long getLong(int i) {
method clear (line 32) | public void clear() {
method size (line 36) | public int size() {
method add (line 40) | public void add(long value) {
FILE: src/main/java/com/oath/halodb/MemoryPoolAddress.java
class MemoryPoolAddress (line 12) | class MemoryPoolAddress {
method MemoryPoolAddress (line 17) | MemoryPoolAddress(byte chunkIndex, int chunkOffset) {
method equals (line 22) | @Override
method hashCode (line 34) | @Override
FILE: src/main/java/com/oath/halodb/MemoryPoolChunk.java
class MemoryPoolChunk (line 15) | class MemoryPoolChunk {
method MemoryPoolChunk (line 24) | private MemoryPoolChunk(long address, int chunkSize, int fixedKeyLengt...
method create (line 32) | static MemoryPoolChunk create(int chunkSize, int fixedKeyLength, int f...
method destroy (line 41) | void destroy() {
method getNextAddress (line 45) | MemoryPoolAddress getNextAddress(int slotOffset) {
method setNextAddress (line 52) | void setNextAddress(int slotOffset, MemoryPoolAddress next) {
method fillNextSlot (line 60) | void fillNextSlot(byte[] key, byte[] value, MemoryPoolAddress nextAddr...
method fillSlot (line 68) | void fillSlot(int slotOffset, byte[] key, byte[] value, MemoryPoolAddr...
method setValue (line 88) | void setValue(byte[] value, int slotOffset) {
method getWriteOffset (line 99) | int getWriteOffset() {
method remaining (line 103) | int remaining() {
method readOnlyValueByteBuffer (line 107) | ByteBuffer readOnlyValueByteBuffer(int offset) {
method readOnlyKeyByteBuffer (line 111) | ByteBuffer readOnlyKeyByteBuffer(int offset) {
method computeHash (line 115) | long computeHash(int slotOffset, Hasher hasher) {
method compareKey (line 120) | boolean compareKey(int slotOffset, byte[] key) {
method compareValue (line 128) | boolean compareValue(int slotOffset, byte[] value) {
method compare (line 136) | private boolean compare(int offset, byte[] array) {
method getKeyLength (line 162) | private byte getKeyLength(int slotOffset) {
FILE: src/main/java/com/oath/halodb/MemoryPoolHashEntries.java
class MemoryPoolHashEntries (line 8) | class MemoryPoolHashEntries {
FILE: src/main/java/com/oath/halodb/NativeMemoryAllocator.java
type NativeMemoryAllocator (line 10) | interface NativeMemoryAllocator {
method allocate (line 12) | long allocate(long size);
method free (line 13) | void free(long peer);
method getTotalAllocated (line 14) | long getTotalAllocated();
FILE: src/main/java/com/oath/halodb/NonMemoryPoolHashEntries.java
class NonMemoryPoolHashEntries (line 13) | final class NonMemoryPoolHashEntries {
method init (line 24) | static void init(int keyLen, long hashEntryAdr) {
method getNext (line 29) | static long getNext(long hashEntryAdr) {
method setNext (line 33) | static void setNext(long hashEntryAdr, long nextAdr) {
method getKeyLen (line 42) | static int getKeyLen(long hashEntryAdr) {
FILE: src/main/java/com/oath/halodb/OffHeapHashTable.java
type OffHeapHashTable (line 14) | interface OffHeapHashTable<V> extends Closeable {
method put (line 21) | boolean put(byte[] key, V value);
method addOrReplace (line 32) | boolean addOrReplace(byte[] key, V old, V value);
method putIfAbsent (line 39) | boolean putIfAbsent(byte[] key, V value);
method remove (line 47) | boolean remove(byte[] key);
method clear (line 52) | void clear();
method get (line 60) | V get(byte[] key);
method containsKey (line 69) | boolean containsKey(byte[] key);
method resetStatistics (line 73) | void resetStatistics();
method size (line 75) | long size();
method hashTableSizes (line 77) | int[] hashTableSizes();
method perSegmentStats (line 79) | SegmentStats[] perSegmentStats();
method getBucketHistogram (line 81) | EstimatedHistogram getBucketHistogram();
method segments (line 83) | int segments();
method loadFactor (line 85) | float loadFactor();
method stats (line 87) | OffHeapHashTableStats stats();
FILE: src/main/java/com/oath/halodb/OffHeapHashTableBuilder.java
class OffHeapHashTableBuilder (line 10) | class OffHeapHashTableBuilder<V> {
method OffHeapHashTableBuilder (line 24) | private OffHeapHashTableBuilder() {
method roundUpToPowerOf2 (line 32) | static int roundUpToPowerOf2(int number, int max) {
method newBuilder (line 38) | static <V> OffHeapHashTableBuilder<V> newBuilder() {
method build (line 42) | public OffHeapHashTable<V> build() {
method getHashTableSize (line 59) | public int getHashTableSize() {
method hashTableSize (line 63) | public OffHeapHashTableBuilder<V> hashTableSize(int hashTableSize) {
method getMemoryPoolChunkSize (line 71) | public int getMemoryPoolChunkSize() {
method memoryPoolChunkSize (line 75) | public OffHeapHashTableBuilder<V> memoryPoolChunkSize(int chunkSize) {
method getValueSerializer (line 83) | public HashTableValueSerializer<V> getValueSerializer() {
method valueSerializer (line 87) | public OffHeapHashTableBuilder<V> valueSerializer(HashTableValueSerial...
method getSegmentCount (line 92) | public int getSegmentCount() {
method segmentCount (line 96) | public OffHeapHashTableBuilder<V> segmentCount(int segmentCount) {
method getLoadFactor (line 104) | public float getLoadFactor() {
method loadFactor (line 108) | public OffHeapHashTableBuilder<V> loadFactor(float loadFactor) {
method getFixedKeySize (line 116) | public int getFixedKeySize() {
method fixedKeySize (line 120) | public OffHeapHashTableBuilder<V> fixedKeySize(int fixedKeySize) {
method getFixedValueSize (line 128) | public int getFixedValueSize() {
method fixedValueSize (line 132) | public OffHeapHashTableBuilder<V> fixedValueSize(int fixedValueSize) {
method getHashAlgorighm (line 140) | public HashAlgorithm getHashAlgorighm() {
method getHasher (line 144) | public Hasher getHasher() {
method hashMode (line 148) | public OffHeapHashTableBuilder<V> hashMode(HashAlgorithm hashMode) {
method isUnlocked (line 157) | public boolean isUnlocked() {
method unlocked (line 161) | public OffHeapHashTableBuilder<V> unlocked(boolean unlocked) {
method isUseMemoryPool (line 166) | public boolean isUseMemoryPool() {
method useMemoryPool (line 170) | public OffHeapHashTableBuilder<V> useMemoryPool(boolean useMemoryPool) {
FILE: src/main/java/com/oath/halodb/OffHeapHashTableImpl.java
class OffHeapHashTableImpl (line 21) | final class OffHeapHashTableImpl<V> implements OffHeapHashTable<V> {
method OffHeapHashTableImpl (line 41) | OffHeapHashTableImpl(OffHeapHashTableBuilder<V> builder) {
method allocateSegment (line 77) | private Segment<V> allocateSegment(OffHeapHashTableBuilder<V> builder) {
method get (line 84) | public V get(byte[] key) {
method containsKey (line 93) | public boolean containsKey(byte[] key) {
method put (line 102) | public boolean put(byte[] k, V v) {
method addOrReplace (line 106) | public boolean addOrReplace(byte[] key, V old, V value) {
method putIfAbsent (line 110) | public boolean putIfAbsent(byte[] k, V v) {
method putInternal (line 114) | private boolean putInternal(byte[] key, V value, boolean ifAbsent, V o...
method valueSize (line 136) | private int valueSize(V v) {
method remove (line 144) | public boolean remove(byte[] k) {
method segment (line 153) | private Segment<V> segment(long hash) {
method keySource (line 158) | private KeyBuffer keySource(byte[] key) {
method clear (line 167) | public void clear() {
method setCapacity (line 178) | public void setCapacity(long capacity) {
method close (line 182) | public void close() {
method resetStatistics (line 198) | public void resetStatistics() {
method stats (line 205) | public OffHeapHashTableStats stats() {
method size (line 230) | public long size() {
method segments (line 238) | public int segments() {
method loadFactor (line 242) | public float loadFactor() {
method hashTableSizes (line 246) | public int[] hashTableSizes() {
method perSegmentSizes (line 254) | public long[] perSegmentSizes() {
method perSegmentStats (line 262) | public SegmentStats[] perSegmentStats() {
method getBucketHistogram (line 272) | public EstimatedHistogram getBucketHistogram() {
method toString (line 300) | public String toString() {
FILE: src/main/java/com/oath/halodb/OffHeapHashTableStats.java
class OffHeapHashTableStats (line 12) | final class OffHeapHashTableStats {
method OffHeapHashTableStats (line 24) | public OffHeapHashTableStats(long hitCount, long missCount,
method getRehashCount (line 39) | public long getRehashCount() {
method getHitCount (line 43) | public long getHitCount() {
method getMissCount (line 47) | public long getMissCount() {
method getSize (line 51) | public long getSize() {
method getPutAddCount (line 55) | public long getPutAddCount() {
method getPutReplaceCount (line 59) | public long getPutReplaceCount() {
method getPutFailCount (line 63) | public long getPutFailCount() {
method getRemoveCount (line 67) | public long getRemoveCount() {
method getSegmentStats (line 71) | public SegmentStats[] getSegmentStats() {
method toString (line 75) | public String toString() {
method maxOf (line 86) | private static long maxOf(long[] arr) {
method minOf (line 96) | private static long minOf(long[] arr) {
method avgOf (line 106) | private static double avgOf(long[] arr) {
method equals (line 114) | public boolean equals(Object o) {
method hashCode (line 133) | public int hashCode() {
FILE: src/main/java/com/oath/halodb/Record.java
class Record (line 12) | public class Record {
method Record (line 20) | public Record(byte[] key, byte[] value) {
method serialize (line 26) | ByteBuffer[] serialize() {
method deserialize (line 31) | static Record deserialize(ByteBuffer buffer, short keySize, int valueS...
method getKey (line 40) | public byte[] getKey() {
method getValue (line 44) | public byte[] getValue() {
method getRecordMetaData (line 48) | InMemoryIndexMetaData getRecordMetaData() {
method setRecordMetaData (line 52) | void setRecordMetaData(InMemoryIndexMetaData recordMetaData) {
method getRecordSize (line 59) | int getRecordSize() {
method setSequenceNumber (line 63) | void setSequenceNumber(long sequenceNumber) {
method getSequenceNumber (line 67) | long getSequenceNumber() {
method setVersion (line 71) | void setVersion(int version) {
method getVersion (line 78) | int getVersion() {
method getHeader (line 82) | Header getHeader() {
method setHeader (line 86) | void setHeader(Header header) {
method serializeHeaderAndComputeChecksum (line 90) | private ByteBuffer serializeHeaderAndComputeChecksum() {
method verifyChecksum (line 97) | boolean verifyChecksum() {
method computeCheckSum (line 104) | private long computeCheckSum(byte[] header) {
method equals (line 114) | @Override
class Header (line 129) | static class Header {
method Header (line 154) | Header(long checkSum, int version, byte keySize, int valueSize, long...
method deserialize (line 163) | static Header deserialize(ByteBuffer buffer) {
method serialize (line 175) | ByteBuffer serialize() {
method verifyHeader (line 186) | static boolean verifyHeader(Record.Header header) {
method getKeySize (line 192) | byte getKeySize() {
method getValueSize (line 196) | int getValueSize() {
method getRecordSize (line 200) | int getRecordSize() {
method getSequenceNumber (line 204) | long getSequenceNumber() {
method getCheckSum (line 208) | long getCheckSum() {
method getVersion (line 212) | int getVersion() {
FILE: src/main/java/com/oath/halodb/RecordKey.java
class RecordKey (line 5) | public class RecordKey {
method RecordKey (line 7) | public RecordKey(byte[] key) {
method getBytes (line 11) | public byte[] getBytes() {
method equals (line 15) | @Override
FILE: src/main/java/com/oath/halodb/Segment.java
class Segment (line 14) | abstract class Segment<V> {
method Segment (line 26) | Segment(HashTableValueSerializer<V> valueSerializer, int fixedValueLen...
method Segment (line 30) | Segment(HashTableValueSerializer<V> valueSerializer, int fixedValueLen...
method lock (line 39) | boolean lock() {
method unlock (line 57) | void unlock(boolean wasFirst) {
method keySource (line 67) | KeyBuffer keySource(byte[] key) {
method getEntry (line 72) | abstract V getEntry(KeyBuffer key);
method containsEntry (line 74) | abstract boolean containsEntry(KeyBuffer key);
method putEntry (line 76) | abstract boolean putEntry(byte[] key, V value, long hash, boolean ifAb...
method removeEntry (line 78) | abstract boolean removeEntry(KeyBuffer key);
method size (line 80) | abstract long size();
method release (line 82) | abstract void release();
method clear (line 84) | abstract void clear();
method hitCount (line 86) | abstract long hitCount();
method missCount (line 88) | abstract long missCount();
method putAddCount (line 90) | abstract long putAddCount();
method putReplaceCount (line 92) | abstract long putReplaceCount();
method removeCount (line 94) | abstract long removeCount();
method resetStatistics (line 96) | abstract void resetStatistics();
method rehashes (line 98) | abstract long rehashes();
method loadFactor (line 100) | abstract float loadFactor();
method hashTableSize (line 102) | abstract int hashTableSize();
method updateBucketHistogram (line 104) | abstract void updateBucketHistogram(EstimatedHistogram hist);
method numberOfChunks (line 109) | long numberOfChunks() {
method numberOfSlots (line 113) | long numberOfSlots() {
method freeListSize (line 117) | long freeListSize() {
FILE: src/main/java/com/oath/halodb/SegmentNonMemoryPool.java
class SegmentNonMemoryPool (line 16) | class SegmentNonMemoryPool<V> extends Segment<V> {
method SegmentNonMemoryPool (line 42) | SegmentNonMemoryPool(OffHeapHashTableBuilder<V> builder) {
method release (line 68) | @Override
method size (line 79) | @Override
method hitCount (line 84) | @Override
method missCount (line 89) | @Override
method putAddCount (line 94) | @Override
method putReplaceCount (line 99) | @Override
method removeCount (line 104) | @Override
method resetStatistics (line 109) | @Override
method rehashes (line 120) | @Override
method getEntry (line 125) | @Override
method containsEntry (line 146) | @Override
method putEntry (line 166) | @Override
method putEntry (line 200) | private boolean putEntry(long newHashEntryAdr, long hash, long keyLen,...
method notSameKey (line 265) | private static boolean notSameKey(long newHashEntryAdr, long newHash, ...
method serializeForPut (line 271) | private void serializeForPut(byte[] key, V value, long hashEntryAdr) {
method freeAndThrow (line 282) | private void freeAndThrow(Throwable e, long hashEntryAdr) {
method clear (line 293) | @Override
method removeEntry (line 315) | @Override
method rehash (line 348) | private void rehash() {
method loadFactor (line 384) | float loadFactor() {
method hashTableSize (line 388) | int hashTableSize() {
method updateBucketHistogram (line 392) | void updateBucketHistogram(EstimatedHistogram hist) {
method getEntryAddresses (line 401) | void getEntryAddresses(int mapSegmentIndex, int nSegments, LongArrayLi...
class Table (line 416) | static final class Table {
method create (line 422) | static Table create(int hashTableSize, boolean throwOOME) {
method Table (line 428) | private Table(long address, int hashTableSize) {
method clear (line 434) | void clear() {
method release (line 440) | void release() {
method finalize (line 445) | protected void finalize() throws Throwable {
method getFirst (line 452) | long getFirst(long hash) {
method setFirst (line 456) | void setFirst(long hash, long hashEntryAdr) {
method bucketOffset (line 460) | long bucketOffset(long hash) {
method bucketIndexForHash (line 464) | private int bucketIndexForHash(long hash) {
method removeLink (line 468) | void removeLink(long hash, long hashEntryAdr, long prevEntryAdr) {
method replaceSentinelLink (line 474) | void replaceSentinelLink(long hash, long hashEntryAdr, long prevEntr...
method removeLinkInternal (line 480) | private void removeLinkInternal(long hash, long hashEntryAdr, long p...
method addAsHead (line 498) | void addAsHead(long hash, long hashEntryAdr) {
method size (line 504) | int size() {
method updateBucketHistogram (line 508) | void updateBucketHistogram(EstimatedHistogram h) {
method removeInternal (line 519) | private void removeInternal(long hashEntryAdr, long prevEntryAdr, long...
method add (line 523) | private void add(long hashEntryAdr, long hash) {
method toString (line 527) | @Override
FILE: src/main/java/com/oath/halodb/SegmentStats.java
class SegmentStats (line 10) | class SegmentStats {
method SegmentStats (line 17) | public SegmentStats(long noOfEntries, long numberOfChunks, long number...
method toString (line 24) | @Override
method equals (line 42) | @Override
method hashCode (line 57) | @Override
FILE: src/main/java/com/oath/halodb/SegmentWithMemoryPool.java
class SegmentWithMemoryPool (line 19) | class SegmentWithMemoryPool<V> extends Segment<V> {
method SegmentWithMemoryPool (line 57) | SegmentWithMemoryPool(OffHeapHashTableBuilder<V> builder) {
method getEntry (line 88) | @Override
method containsEntry (line 110) | @Override
method putEntry (line 132) | @Override
method removeEntry (line 191) | @Override
method getNext (line 215) | private MemoryPoolAddress getNext(MemoryPoolAddress address) {
method writeToFreeSlot (line 224) | private MemoryPoolAddress writeToFreeSlot(byte[] key, byte[] value, Me...
method removeInternal (line 252) | private void removeInternal(MemoryPoolAddress address, MemoryPoolAddre...
method rehash (line 268) | private void rehash() {
method size (line 298) | @Override
method release (line 303) | @Override
method clear (line 318) | @Override
method hitCount (line 332) | @Override
method missCount (line 337) | @Override
method putAddCount (line 342) | @Override
method putReplaceCount (line 347) | @Override
method removeCount (line 352) | @Override
method resetStatistics (line 357) | @Override
method numberOfChunks (line 367) | @Override
method numberOfSlots (line 372) | @Override
method freeListSize (line 377) | @Override
method rehashes (line 382) | @Override
method loadFactor (line 387) | @Override
method hashTableSize (line 392) | @Override
method updateBucketHistogram (line 397) | @Override
class Table (line 407) | static final class Table {
method create (line 413) | static Table create(int hashTableSize) {
method Table (line 419) | private Table(long address, int hashTableSize) {
method clear (line 425) | void clear() {
method release (line 429) | void release() {
method finalize (line 434) | protected void finalize() throws Throwable {
method getFirst (line 441) | MemoryPoolAddress getFirst(long hash) {
method addAsHead (line 449) | void addAsHead(long hash, MemoryPoolAddress entryAddress) {
method bucketOffset (line 455) | long bucketOffset(long hash) {
method bucketIndexForHash (line 459) | private int bucketIndexForHash(long hash) {
method size (line 463) | int size() {
method updateBucketHistogram (line 467) | void updateBucketHistogram(EstimatedHistogram h, final List<MemoryPo...
method getFreeListHead (line 479) | @VisibleForTesting
method getChunkWriteOffset (line 484) | @VisibleForTesting
FILE: src/main/java/com/oath/halodb/TombstoneEntry.java
class TombstoneEntry (line 11) | class TombstoneEntry {
method TombstoneEntry (line 33) | TombstoneEntry(byte[] key, long sequenceNumber, long checkSum, int ver...
method getKey (line 40) | byte[] getKey() {
method getSequenceNumber (line 44) | long getSequenceNumber() {
method getVersion (line 48) | int getVersion() {
method getCheckSum (line 52) | long getCheckSum() {
method size (line 56) | int size() {
method serialize (line 60) | ByteBuffer[] serialize() {
method deserialize (line 71) | static TombstoneEntry deserialize(ByteBuffer buffer) {
method deserializeIfNotCorrupted (line 83) | static TombstoneEntry deserializeIfNotCorrupted(ByteBuffer buffer) {
method computeCheckSum (line 106) | private long computeCheckSum(byte[] header) {
method computeCheckSum (line 113) | long computeCheckSum() {
FILE: src/main/java/com/oath/halodb/TombstoneFile.java
class TombstoneFile (line 24) | class TombstoneFile {
method create (line 39) | static TombstoneFile create(DBDirectory dbDirectory, int fileId, HaloD...
method TombstoneFile (line 54) | TombstoneFile(File backingFile, HaloDBOptions options, DBDirectory dbD...
method open (line 60) | void open() throws IOException {
method close (line 64) | void close() throws IOException {
method delete (line 70) | void delete() throws IOException {
method write (line 77) | void write(TombstoneEntry entry) throws IOException {
method getWriteOffset (line 98) | long getWriteOffset() {
method flushToDisk (line 102) | void flushToDisk() throws IOException {
method repairFile (line 114) | TombstoneFile repairFile(DBDirectory dbDirectory) throws IOException {
method createRepairFile (line 139) | private TombstoneFile createRepairFile() throws IOException {
method getName (line 151) | String getName() {
method getPath (line 155) | private Path getPath() {
method getSize (line 159) | private long getSize() {
method newIterator (line 163) | TombstoneFile.TombstoneFileIterator newIterator() throws IOException {
method newIteratorWithCheckForDataCorruption (line 168) | TombstoneFile.TombstoneFileIterator newIteratorWithCheckForDataCorrupt...
method getTombstoneFile (line 172) | private static File getTombstoneFile(DBDirectory dbDirectory, int file...
class TombstoneFileIterator (line 176) | class TombstoneFileIterator implements Iterator<TombstoneEntry> {
method TombstoneFileIterator (line 181) | TombstoneFileIterator(boolean discardCorruptedRecords) throws IOExce...
method hasNext (line 186) | @Override
method next (line 191) | @Override
FILE: src/main/java/com/oath/halodb/Uns.java
class Uns (line 25) | final class Uns {
class AllocInfo (line 41) | private static final class AllocInfo {
method AllocInfo (line 46) | AllocInfo(Long size, Throwable trace) {
method clearUnsDebugForTest (line 52) | static void clearUnsDebugForTest() {
method freed (line 69) | private static void freed(long address) {
method allocated (line 80) | private static void allocated(long address, long bytes) {
method validate (line 91) | private static void validate(long address, long offset, long len) {
method Uns (line 169) | private Uns() {
method getLongFromByteArray (line 172) | static long getLongFromByteArray(byte[] array, int offset) {
method getIntFromByteArray (line 178) | static int getIntFromByteArray(byte[] array, int offset) {
method getShortFromByteArray (line 185) | static short getShortFromByteArray(byte[] array, int offset) {
method getAndPutLong (line 192) | static long getAndPutLong(long address, long offset, long value) {
method putLong (line 198) | static void putLong(long address, long offset, long value) {
method getLong (line 203) | static long getLong(long address, long offset) {
method putInt (line 208) | static void putInt(long address, long offset, int value) {
method getInt (line 213) | static int getInt(long address, long offset) {
method putShort (line 218) | static void putShort(long address, long offset, short value) {
method getShort (line 223) | static short getShort(long address, long offset) {
method putByte (line 228) | static void putByte(long address, long offset, byte value) {
method getByte (line 233) | static byte getByte(long address, long offset) {
method decrement (line 238) | static boolean decrement(long address, long offset) {
method increment (line 244) | static void increment(long address, long offset) {
method copyMemory (line 249) | static void copyMemory(byte[] arr, int off, long address, long offset,...
method copyMemory (line 254) | static void copyMemory(long address, long offset, byte[] arr, int off,...
method copyMemory (line 259) | static void copyMemory(long src, long srcOffset, long dst, long dstOff...
method setMemory (line 265) | static void setMemory(long address, long offset, long len, byte val) {
method memoryCompare (line 270) | static boolean memoryCompare(long adr1, long off1, long adr2, long off...
method crc32 (line 304) | static long crc32(long address, long offset, long len) {
method getTotalAllocated (line 309) | static long getTotalAllocated() {
method allocate (line 313) | static long allocate(long bytes) {
method allocate (line 317) | static long allocate(long bytes, boolean throwOOME) {
method allocateIOException (line 327) | static long allocateIOException(long bytes) throws IOException {
method allocateIOException (line 331) | static long allocateIOException(long bytes, boolean throwOOME) throws ...
method free (line 339) | static void free(long address) {
method directBufferFor (line 369) | static ByteBuffer directBufferFor(long address, long offset, long len,...
method invalidateDirectBuffer (line 387) | static void invalidateDirectBuffer(ByteBuffer buffer) {
method readOnlyBuffer (line 394) | static ByteBuffer readOnlyBuffer(long hashEntryAdr, int length, long o...
method buffer (line 398) | static ByteBuffer buffer(long hashEntryAdr, long length, long offset) {
FILE: src/main/java/com/oath/halodb/UnsExt.java
class UnsExt (line 12) | abstract class UnsExt {
method UnsExt (line 16) | UnsExt(Unsafe unsafe) {
method getAndPutLong (line 20) | abstract long getAndPutLong(long address, long offset, long value);
method getAndAddInt (line 22) | abstract int getAndAddInt(long address, long offset, int value);
method crc32 (line 24) | abstract long crc32(long address, long offset, long len);
FILE: src/main/java/com/oath/halodb/UnsExt8.java
class UnsExt8 (line 14) | final class UnsExt8 extends UnsExt {
method UnsExt8 (line 16) | UnsExt8(Unsafe unsafe) {
method getAndPutLong (line 20) | long getAndPutLong(long address, long offset, long value) {
method getAndAddInt (line 24) | int getAndAddInt(long address, long offset, int value) {
method crc32 (line 28) | long crc32(long address, long offset, long len) {
FILE: src/main/java/com/oath/halodb/UnsafeAllocator.java
class UnsafeAllocator (line 14) | final class UnsafeAllocator implements NativeMemoryAllocator {
method allocate (line 28) | public long allocate(long size) {
method free (line 36) | public void free(long peer) {
method getTotalAllocated (line 40) | public long getTotalAllocated() {
FILE: src/main/java/com/oath/halodb/Utils.java
class Utils (line 8) | class Utils {
method roundUpToPowerOf2 (line 9) | static long roundUpToPowerOf2(long number) {
method getValueOffset (line 13) | static int getValueOffset(int recordOffset, byte[] key) {
method getRecordSize (line 18) | static int getRecordSize(int keySize, int valueSize) {
method getValueSize (line 22) | static int getValueSize(int recordSize, byte[] key) {
method getMetaData (line 26) | static InMemoryIndexMetaData getMetaData(IndexFileEntry entry, int fil...
method toUnsignedIntFromInt (line 30) | static long toUnsignedIntFromInt(int value) {
method toSignedIntFromLong (line 34) | static int toSignedIntFromLong(long value) {
method toUnsignedByte (line 38) | static int toUnsignedByte(byte value) {
FILE: src/main/java/com/oath/halodb/Versions.java
class Versions (line 8) | class Versions {
FILE: src/main/java/com/oath/halodb/histo/EstimatedHistogram.java
class EstimatedHistogram (line 17) | public class EstimatedHistogram {
method EstimatedHistogram (line 34) | public EstimatedHistogram() {
method EstimatedHistogram (line 38) | public EstimatedHistogram(int bucketCount) {
method EstimatedHistogram (line 43) | public EstimatedHistogram(long[] offsets, long[] bucketData) {
method newOffsets (line 49) | private static long[] newOffsets(int size) {
method getBucketOffsets (line 68) | public long[] getBucketOffsets() {
method add (line 75) | public void add(long n) {
method get (line 88) | long get(int bucket) {
method getBuckets (line 96) | public long[] getBuckets(boolean reset) {
method min (line 116) | public long min() {
method max (line 129) | public long max() {
method percentile (line 146) | public long percentile(double percentile) {
method mean (line 172) | public long mean() {
method count (line 192) | public long count() {
method isOverflowed (line 204) | public boolean isOverflowed() {
method log (line 211) | public void log(Logger log) {
method toString (line 241) | public String toString() {
method nameOfRange (line 275) | private static String nameOfRange(long[] bucketOffsets, int index) {
method appendRange (line 281) | private static void appendRange(StringBuilder sb, long[] bucketOffsets...
method equals (line 304) | @Override
method hashCode (line 319) | @Override
FILE: src/test/java/com/oath/halodb/CheckOffHeapHashTable.java
class CheckOffHeapHashTable (line 19) | final class CheckOffHeapHashTable<V> implements OffHeapHashTable<V>
method CheckOffHeapHashTable (line 30) | CheckOffHeapHashTable(OffHeapHashTableBuilder<V> builder)
method put (line 47) | public boolean put(byte[] key, V value)
method addOrReplace (line 56) | public boolean addOrReplace(byte[] key, V old, V value)
method putIfAbsent (line 66) | public boolean putIfAbsent(byte[] key, V v)
method putIfAbsent (line 75) | public boolean putIfAbsent(byte[] key, V value, long expireAt)
method put (line 80) | public boolean put(byte[] key, V value, long expireAt)
method remove (line 85) | public boolean remove(byte[] key)
method clear (line 92) | public void clear()
method get (line 98) | public V get(byte[] key)
method containsKey (line 110) | public boolean containsKey(byte[] key)
method resetStatistics (line 117) | public void resetStatistics()
method size (line 124) | public long size()
method hashTableSizes (line 132) | public int[] hashTableSizes()
method perSegmentStats (line 138) | public SegmentStats[] perSegmentStats() {
method getBucketHistogram (line 148) | public EstimatedHistogram getBucketHistogram()
method segments (line 153) | public int segments()
method loadFactor (line 158) | public float loadFactor()
method stats (line 163) | public OffHeapHashTableStats stats()
method putAddCount (line 178) | private long putAddCount()
method putReplaceCount (line 186) | private long putReplaceCount()
method removeCount (line 194) | private long removeCount()
method hitCount (line 202) | private long hitCount()
method missCount (line 210) | private long missCount()
method close (line 218) | public void close()
method segment (line 227) | private CheckSegment segment(long hash)
method keySource (line 233) | KeyBuffer keySource(byte[] key) {
method value (line 238) | private byte[] value(V value)
FILE: src/test/java/com/oath/halodb/CheckSegment.java
class CheckSegment (line 19) | final class CheckSegment {
method CheckSegment (line 30) | public CheckSegment(int initialCapacity, float loadFactor) {
method clear (line 34) | synchronized void clear()
method get (line 40) | synchronized byte[] get(KeyBuffer keyBuffer)
method put (line 56) | synchronized boolean put(KeyBuffer keyBuffer, byte[] data, boolean ifA...
method remove (line 81) | synchronized boolean remove(KeyBuffer keyBuffer)
method size (line 93) | synchronized long size()
method sizeOf (line 98) | static long sizeOf(KeyBuffer key, byte[] value)
method resetStatistics (line 104) | void resetStatistics()
FILE: src/test/java/com/oath/halodb/CompactionWithErrorsTest.java
class CompactionWithErrorsTest (line 27) | public class CompactionWithErrorsTest extends TestBase {
method testCompactionWithException (line 29) | @Test
method testRestartCompactionThreadAfterCrash (line 77) | @Test
method testCompactionThreadStopWithIOException (line 151) | @Test
method insertAndUpdate (line 183) | private List<Record> insertAndUpdate(HaloDB db, int numberOfRecords) t...
FILE: src/test/java/com/oath/halodb/CrossCheckTest.java
class CrossCheckTest (line 30) | public class CrossCheckTest
method deinit (line 36) | @AfterMethod(alwaysRun = true)
method cache (line 42) | static DoubleCheckOffHeapHashTableImpl<byte[]> cache(HashAlgorithm has...
method cache (line 47) | static DoubleCheckOffHeapHashTableImpl<byte[]> cache(HashAlgorithm has...
method cache (line 52) | static DoubleCheckOffHeapHashTableImpl<byte[]> cache(HashAlgorithm has...
method cache (line 57) | static DoubleCheckOffHeapHashTableImpl<byte[]> cache(HashAlgorithm has...
method cacheEviction (line 77) | @DataProvider(name = "hashAlgorithms")
method testBasics (line 91) | @Test(dataProvider = "hashAlgorithms")
method testManyValues (line 123) | @Test(dataProvider = "hashAlgorithms", dependsOnMethods = "testBasics")
method testRehash (line 182) | @Test(dataProvider = "hashAlgorithms", dependsOnMethods = "testBasics")
method testPutTooLargeValue (line 268) | @Test(dataProvider = "hashAlgorithms", dependsOnMethods = "testBasics",
method testPutTooLargeKey (line 280) | @Test(dataProvider = "hashAlgorithms", dependsOnMethods = "testBasics",
method testAddOrReplace (line 294) | @Test(dataProvider = "hashAlgorithms", dependsOnMethods = "testBasics")
method testPutIfAbsent (line 327) | @Test(dataProvider = "hashAlgorithms")
method testRemove (line 343) | @Test(dataProvider = "hashAlgorithms")
method testClear (line 364) | @Test(dataProvider = "hashAlgorithms")
method testGet_Put (line 385) | @Test(dataProvider = "hashAlgorithms")
method testContainsKey (line 409) | @Test(dataProvider = "hashAlgorithms")
method testGetBucketHistogram (line 423) | @Test(dataProvider = "hashAlgorithms")
method sum (line 452) | private static int sum(int[] ints)
method testResetStatistics (line 460) | @Test(dataProvider = "hashAlgorithms")
FILE: src/test/java/com/oath/halodb/DBDirectoryTest.java
class DBDirectoryTest (line 25) | public class DBDirectoryTest {
method testListIndexFiles (line 33) | @Test
method testListDataFiles (line 41) | @Test
method testListTombstoneFiles (line 50) | @Test
method testSyncMetaDataNoError (line 60) | @Test
method createDirectory (line 65) | @BeforeMethod
method deleteDirectory (line 102) | @AfterMethod
FILE: src/test/java/com/oath/halodb/DBMetaDataTest.java
class DBMetaDataTest (line 19) | public class DBMetaDataTest {
method testDBMetaData (line 24) | @Test
method testCheckSum (line 79) | @Test
method createDirectory (line 98) | @BeforeMethod
method deleteDirectory (line 103) | @AfterMethod
FILE: src/test/java/com/oath/halodb/DBRepairTest.java
class DBRepairTest (line 19) | public class DBRepairTest extends TestBase {
method testRepairDB (line 21) | @Test(dataProvider = "Options")
method testRepairDBWithCompaction (line 82) | @Test(dataProvider = "Options")
method testRepairWithMultipleTombstoneFiles (line 130) | @Test
FILE: src/test/java/com/oath/halodb/DataConsistencyDB.java
class DataConsistencyDB (line 23) | class DataConsistencyDB {
method DataConsistencyDB (line 34) | DataConsistencyDB(HaloDB haloDB, int noOfRecords) {
method put (line 43) | void put(int keyIndex, ByteBuffer keyBuf, byte[] value) throws HaloDBE...
method compareValues (line 56) | int compareValues(int keyIndex, ByteBuffer keyBuf) throws HaloDBExcept...
method checkSize (line 67) | boolean checkSize() {
method delete (line 71) | void delete(int keyIndex, ByteBuffer keyBuf) throws HaloDBException {
method iterateAndCheck (line 83) | boolean iterateAndCheck(HaloDB db) {
method checkValues (line 104) | private int checkValues(long key, ByteBuffer keyBuf, HaloDB haloDB) th...
method containsKey (line 127) | boolean containsKey(byte[] key) throws HaloDBException {
FILE: src/test/java/com/oath/halodb/DataConsistencyTest.java
class DataConsistencyTest (line 21) | public class DataConsistencyTest extends TestBase {
method init (line 42) | @BeforeMethod
method testConcurrentReadAndUpdates (line 51) | @Test(dataProvider = "Options")
class Writer (line 109) | class Writer extends Thread {
method Writer (line 117) | Writer(DataConsistencyDB db) {
method run (line 122) | @Override
class Reader (line 185) | class Reader extends Thread {
method Reader (line 191) | Reader(DataConsistencyDB db) {
method run (line 196) | @Override
method getRandomKeyLength (line 216) | private int getRandomKeyLength() {
method generateRandomValueWithVersion (line 220) | private byte[] generateRandomValueWithVersion(long version, int size) {
method getVersionFromValue (line 226) | static long getVersionFromValue(byte[] value) {
FILE: src/test/java/com/oath/halodb/DoubleCheckOffHeapHashTableImpl.java
class DoubleCheckOffHeapHashTableImpl (line 22) | public class DoubleCheckOffHeapHashTableImpl<V> implements OffHeapHashTa...
method DoubleCheckOffHeapHashTableImpl (line 27) | public DoubleCheckOffHeapHashTableImpl(OffHeapHashTableBuilder<V> buil...
method put (line 33) | public boolean put(byte[] key, V value)
method addOrReplace (line 41) | public boolean addOrReplace(byte[] key, V old, V value)
method putIfAbsent (line 49) | public boolean putIfAbsent(byte[] k, V v)
method putIfAbsent (line 57) | public boolean putIfAbsent(byte[] key, V value, long expireAt)
method put (line 62) | public boolean put(byte[] key, V value, long expireAt)
method remove (line 67) | public boolean remove(byte[] key)
method clear (line 75) | public void clear()
method get (line 81) | public V get(byte[] key)
method containsKey (line 89) | public boolean containsKey(byte[] key)
method resetStatistics (line 97) | public void resetStatistics()
method size (line 103) | public long size()
method hashTableSizes (line 111) | public int[] hashTableSizes()
method perSegmentStats (line 116) | @Override
method getBucketHistogram (line 124) | public EstimatedHistogram getBucketHistogram()
method segments (line 129) | public int segments()
method loadFactor (line 137) | public float loadFactor()
method stats (line 145) | public OffHeapHashTableStats stats()
method close (line 153) | public void close() throws IOException
FILE: src/test/java/com/oath/halodb/FileUtilsTest.java
class FileUtilsTest (line 26) | public class FileUtilsTest {
method createDirectory (line 56) | @BeforeMethod
method deleteDirectory (line 86) | @AfterMethod
method testListIndexFiles (line 91) | @Test
method testListDataFiles (line 99) | @Test
method testListTombstoneFiles (line 107) | @Test
method testDirectoryCreateAndDelete (line 116) | @Test
FILE: src/test/java/com/oath/halodb/HaloDBCompactionTest.java
class HaloDBCompactionTest (line 19) | public class HaloDBCompactionTest extends TestBase {
method testCompaction (line 26) | @Test(dataProvider = "Options")
method testReOpenDBAfterCompaction (line 46) | @Test(dataProvider = "Options")
method testReOpenDBWithoutMerge (line 69) | @Test(dataProvider = "Options")
method testSyncWrites (line 90) | @Test(dataProvider = "Options")
method testUpdatesToSameFile (line 114) | @Test(dataProvider = "Options")
method testFilesWithStaleDataAddedToCompactionQueueDuringDBOpen (line 135) | @Test(dataProvider = "Options")
method testPauseAndResumeCompaction (line 201) | @Test
method insertAndUpdateRecords (line 264) | private Record[] insertAndUpdateRecords(int numberOfRecords, HaloDB db...
method insertAndUpdateRecordsToSameFile (line 290) | private Record[] insertAndUpdateRecordsToSameFile(int numberOfRecords,...
FILE: src/test/java/com/oath/halodb/HaloDBDeletionTest.java
class HaloDBDeletionTest (line 18) | public class HaloDBDeletionTest extends TestBase {
method testSimpleDelete (line 20) | @Test(dataProvider = "Options")
method testDeleteWithIterator (line 49) | @Test(dataProvider = "Options")
method testDeleteAndInsert (line 76) | @Test(dataProvider = "Options")
method testDeleteAndOpen (line 130) | @Test(dataProvider = "Options")
method testDeleteAndMerge (line 163) | @Test(dataProvider = "Options")
method testDeleteAllRecords (line 215) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/HaloDBFileCompactionTest.java
class HaloDBFileCompactionTest (line 20) | public class HaloDBFileCompactionTest extends TestBase {
method testCompaction (line 22) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/HaloDBFileTest.java
class HaloDBFileTest (line 22) | public class HaloDBFileTest {
method before (line 32) | @BeforeMethod
method after (line 46) | @AfterMethod
method testIndexFile (line 56) | @Test
method testFileWithInvalidRecord (line 64) | @Test
method testCorruptedHeader (line 87) | @Test
method testRebuildIndexFile (line 117) | @Test
method testRepairDataFileWithCorruptedValue (line 130) | @Test
method testRepairDataFileWithInCompleteRecord (line 153) | @Test
method testRepairDataFileContainingRecordsWithCorruptedHeader (line 176) | @Test
method testRepairDataFileContainingRecordsWithValidButCorruptedHeader (line 193) | @Test
method verifyIndexFile (line 215) | private void verifyIndexFile(IndexFile file, List<Record> recordList) ...
method insertTestRecords (line 231) | private List<Record> insertTestRecords() throws IOException {
method verifyDataFile (line 241) | private void verifyDataFile(List<Record> recordList, HaloDBFile dataFi...
FILE: src/test/java/com/oath/halodb/HaloDBIteratorTest.java
class HaloDBIteratorTest (line 23) | public class HaloDBIteratorTest extends TestBase {
method testWithEmptyDB (line 25) | @Test(expectedExceptions = NoSuchElementException.class, dataProvider ...
method testWithDelete (line 35) | @Test(dataProvider = "Options")
method testPutAndGetDB (line 60) | @Test(dataProvider = "Options")
method testPutUpdateAndGetDB (line 78) | @Test(dataProvider = "Options")
method testPutUpdateCompactAndGetDB (line 97) | @Test(dataProvider = "Options")
method testConcurrentCompactionAndIterator (line 122) | @Test(dataProvider = "Options")
method testConcurrentCompactionAndIteratorWhenFileIsClosed (line 152) | @Test(dataProvider = "Options")
method testConcurrentCompactionAndIteratorWithMockedException (line 197) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/HaloDBKeyIteratorTest.java
class HaloDBKeyIteratorTest (line 14) | public class HaloDBKeyIteratorTest extends TestBase {
method testWithEmptyDB (line 16) | @Test(expectedExceptions = NoSuchElementException.class, dataProvider ...
method testWithDelete (line 26) | @Test(dataProvider = "Options")
method testPutAndGetDB (line 51) | @Test(dataProvider = "Options")
method testPutUpdateAndGetDB (line 74) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/HaloDBOptionsTest.java
class HaloDBOptionsTest (line 8) | public class HaloDBOptionsTest extends TestBase {
method testDefaultOptions (line 10) | @Test
method testSetBuildIndexThreads (line 20) | @Test
FILE: src/test/java/com/oath/halodb/HaloDBStatsTest.java
class HaloDBStatsTest (line 15) | public class HaloDBStatsTest extends TestBase {
method testOptions (line 17) | @Test(dataProvider = "Options")
method testStaleMap (line 43) | @Test(dataProvider = "Options")
method testCompactionStats (line 74) | @Test(dataProvider = "Options")
method testIndexStats (line 174) | @Test(dataProvider = "Options")
method testStatsToStringMap (line 204) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/HaloDBTest.java
class HaloDBTest (line 22) | public class HaloDBTest extends TestBase {
method testPutAndGetDB (line 24) | @Test(dataProvider = "Options")
method testPutUpdateAndGetDB (line 50) | @Test(dataProvider = "Options")
method testCreateCloseAndOpenDB (line 79) | @Test(dataProvider = "Options")
method testSyncWrite (line 127) | @Test(dataProvider = "Options")
method testToCheckThatLatestUpdateIsPickedAfterDBOpen (line 156) | @Test(dataProvider = "Options")
method testToCheckDelete (line 192) | @Test(dataProvider = "Options")
method testDeleteCloseAndOpen (line 225) | @Test(dataProvider = "Options")
method testDeleteAndInsert (line 262) | @Test(dataProvider = "Options")
method testDeleteInsertCloseAndOpen (line 307) | @Test(dataProvider = "Options")
method testDBMetaFile (line 356) | @Test
method testMaxFileSize (line 395) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
method testLock (line 416) | @Test(expectedExceptions = HaloDBException.class, expectedExceptionsMe...
method testLockReleaseOnError (line 426) | @Test
method testPutAfterClose (line 460) | @Test(expectedExceptions = HaloDBException.class)
method testDeleteAfterClose (line 469) | @Test(expectedExceptions = HaloDBException.class)
method testPutAfterCloseWithoutWrites (line 481) | @Test(expectedExceptions = NullPointerException.class)
method testDeleteAfterCloseWithoutWrites (line 489) | @Test(expectedExceptions = NullPointerException.class)
method testSnapshot (line 500) | @Test
method testCreateAndDeleteSnapshot (line 524) | @Test
method testSnapshotAfterBeenCompacted (line 546) | @Test
FILE: src/test/java/com/oath/halodb/HashTableTestUtils.java
class HashTableTestUtils (line 20) | final class HashTableTestUtils
method serialize (line 25) | public void serialize(String s, ByteBuffer buf)
method deserialize (line 33) | public String deserialize(ByteBuffer buf)
method serializedSize (line 41) | public int serializedSize(String s)
method serialize (line 49) | @Override
method deserialize (line 54) | @Override
method serializedSize (line 62) | @Override
method serialize (line 70) | public void serialize(String s, ByteBuffer buf)
method deserialize (line 75) | public String deserialize(ByteBuffer buf)
method serializedSize (line 83) | public int serializedSize(String s)
method serialize (line 90) | public void serialize(String s, ByteBuffer buf)
method deserialize (line 98) | public String deserialize(ByteBuffer buf)
method serializedSize (line 103) | public int serializedSize(String s)
method serialize (line 111) | public void serialize(byte[] s, ByteBuffer buf)
method deserialize (line 116) | public byte[] deserialize(ByteBuffer buf)
method serializedSize (line 123) | public int serializedSize(byte[] s)
method writeUTFLen (line 129) | static int writeUTFLen(String str)
method serialize (line 155) | public void serialize(Integer s, ByteBuffer buf)
method deserialize (line 167) | public Integer deserialize(ByteBuffer buf)
method serializedSize (line 182) | public int serializedSize(Integer s)
method serialize (line 189) | public void serialize(Integer s, ByteBuffer buf)
method deserialize (line 194) | public Integer deserialize(ByteBuffer buf)
method serializedSize (line 209) | public int serializedSize(Integer s)
method serialize (line 216) | public void serialize(Integer s, ByteBuffer buf)
method deserialize (line 221) | public Integer deserialize(ByteBuffer buf)
method serializedSize (line 226) | public int serializedSize(Integer s)
method fillMany (line 257) | static List<KeyValuePair> fillMany(OffHeapHashTable<byte[]> cache, int...
method fill (line 262) | static List<KeyValuePair> fill(OffHeapHashTable<byte[]> cache, int fix...
method randomBytes (line 275) | static byte[] randomBytes(int len)
class KeyValuePair (line 283) | static class KeyValuePair {
method KeyValuePair (line 286) | KeyValuePair(byte[] key, byte[] value) {
FILE: src/test/java/com/oath/halodb/HashTableUtilTest.java
class HashTableUtilTest (line 15) | public class HashTableUtilTest
method testBitNum (line 19) | @Test
FILE: src/test/java/com/oath/halodb/HashTableValueSerializerTest.java
class HashTableValueSerializerTest (line 19) | public class HashTableValueSerializerTest
method deinit (line 21) | @AfterMethod(alwaysRun = true)
method testFailingValueSerializerOnPut (line 27) | @Test
FILE: src/test/java/com/oath/halodb/HasherTest.java
class HasherTest (line 15) | public class HasherTest
method testMurmur3 (line 17) | @Test
method testCRC32 (line 23) | @Test
method testXX (line 29) | @Test
method test (line 35) | private void test(HashAlgorithm hash)
FILE: src/test/java/com/oath/halodb/IndexFileEntryTest.java
class IndexFileEntryTest (line 16) | public class IndexFileEntryTest {
method serializeIndexFileEntry (line 18) | @Test
method deserializeIndexFileEntry (line 47) | @Test
FILE: src/test/java/com/oath/halodb/KeyBufferTest.java
class KeyBufferTest (line 24) | public class KeyBufferTest
method deinit (line 26) | @AfterMethod(alwaysRun = true)
method hashes (line 32) | @DataProvider
method testHashFinish (line 42) | @Test(dataProvider = "hashes")
method hash (line 64) | private long hash(Hasher hasher)
method hasher (line 76) | private Hasher hasher(HashAlgorithm hashAlgorithm)
method testHashFinish16 (line 89) | @Test(dataProvider = "hashes", dependsOnMethods = "testHashFinish")
method testHashRandom (line 110) | @Test(dataProvider = "hashes", dependsOnMethods = "testHashFinish16")
method testSameKey (line 133) | @Test
method compareKey (line 154) | private void compareKey(byte[] randomKey) {
FILE: src/test/java/com/oath/halodb/LinkedImplTest.java
class LinkedImplTest (line 15) | public class LinkedImplTest
method deinit (line 17) | @AfterMethod(alwaysRun = true)
method cache (line 23) | static OffHeapHashTable<String> cache()
method cache (line 28) | static OffHeapHashTable<String> cache(long capacity)
method cache (line 33) | static OffHeapHashTable<String> cache(long capacity, int hashTableSize)
method cache (line 38) | static OffHeapHashTable<String> cache(long capacity, int hashTableSize...
method testExtremeHashTableSize (line 53) | @Test(expectedExceptions = IllegalArgumentException.class)
FILE: src/test/java/com/oath/halodb/LongArrayListTest.java
class LongArrayListTest (line 16) | public class LongArrayListTest
method testLongArrayList (line 18) | @Test
FILE: src/test/java/com/oath/halodb/MemoryPoolChunkTest.java
class MemoryPoolChunkTest (line 16) | public class MemoryPoolChunkTest {
method destroyChunk (line 20) | @AfterMethod(alwaysRun = true)
method testSetAndGetMethods (line 27) | @Test
method testWithInvalidOffset (line 85) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
method testWithInvalidKey (line 94) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
method testCompare (line 103) | @Test
method testCompareKeyWithException (line 128) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
method testCompareValueWithException (line 146) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
method setAndGetNextAddress (line 162) | @Test
FILE: src/test/java/com/oath/halodb/NonMemoryPoolHashEntriesTest.java
class NonMemoryPoolHashEntriesTest (line 17) | public class NonMemoryPoolHashEntriesTest
method deinit (line 19) | @AfterMethod(alwaysRun = true)
method testInit (line 27) | @Test
method testCompareKey (line 47) | @Test
method testGetSetNext (line 80) | @Test
FILE: src/test/java/com/oath/halodb/OffHeapHashTableBuilderTest.java
class OffHeapHashTableBuilderTest (line 17) | public class OffHeapHashTableBuilderTest
method testHashTableSize (line 20) | @Test
method testChunkSize (line 29) | @Test
method testSegmentCount (line 37) | @Test
method testLoadFactor (line 51) | @Test
method testValueSerializer (line 60) | @Test
method testFixedValueSize (line 87) | @Test(expectedExceptions = IllegalArgumentException.class, expectedExc...
FILE: src/test/java/com/oath/halodb/RandomDataGenerator.java
class RandomDataGenerator (line 10) | class RandomDataGenerator {
method RandomDataGenerator (line 16) | RandomDataGenerator() {
method getData (line 22) | byte[] getData(int length) {
FILE: src/test/java/com/oath/halodb/RecordTest.java
class RecordTest (line 14) | public class RecordTest {
method testSerializeHeader (line 16) | @Test
method testDeserializeHeader (line 33) | @Test
method testSerializeRecord (line 60) | @Test
FILE: src/test/java/com/oath/halodb/RehashTest.java
class RehashTest (line 20) | public class RehashTest
method deinit (line 22) | @AfterMethod(alwaysRun = true)
method testRehash (line 28) | @Test
FILE: src/test/java/com/oath/halodb/SegmentWithMemoryPoolTest.java
class SegmentWithMemoryPoolTest (line 19) | public class SegmentWithMemoryPoolTest {
method testChunkAllocations (line 21) | @Test
method testFreeList (line 55) | @Test
method testOutOfMemoryException (line 127) | @Test(expectedExceptions = OutOfMemoryError.class, expectedExceptionsM...
method testReplace (line 149) | @Test
method testRehash (line 202) | @Test
method addEntriesToSegment (line 233) | private List<Record> addEntriesToSegment(SegmentWithMemoryPool<byte[]>...
class Record (line 247) | private static class Record {
method Record (line 251) | public Record(KeyBuffer keyBuffer, byte[] value) {
FILE: src/test/java/com/oath/halodb/SequenceNumberTest.java
class SequenceNumberTest (line 21) | public class SequenceNumberTest extends TestBase {
method testSequenceNumber (line 23) | @Test(dataProvider = "Options")
FILE: src/test/java/com/oath/halodb/SyncWriteTest.java
class SyncWriteTest (line 14) | public class SyncWriteTest extends TestBase {
method testSyncWrites (line 16) | @Test
method testNonSyncWrites (line 52) | @Test
method testNonSyncDeletes (line 77) | @Test
FILE: src/test/java/com/oath/halodb/TestBase.java
class TestBase (line 14) | public class TestBase {
method optionData (line 21) | @DataProvider(name = "Options")
method getTestDB (line 36) | HaloDB getTestDB(String directory, HaloDBOptions options) throws HaloD...
method getTestDBWithoutDeletingFiles (line 53) | HaloDB getTestDBWithoutDeletingFiles(String directory, HaloDBOptions o...
method closeDB (line 61) | @AfterMethod(alwaysRun = true)
FILE: src/test/java/com/oath/halodb/TestListener.java
class TestListener (line 14) | public class TestListener implements ITestListener {
method onTestStart (line 17) | @Override
method onTestSuccess (line 23) | @Override
method onTestFailure (line 29) | @Override
method onTestSkipped (line 34) | @Override
method onTestFailedButWithinSuccessPercentage (line 39) | @Override
method onStart (line 44) | @Override
method onFinish (line 49) | @Override
FILE: src/test/java/com/oath/halodb/TestUtils.java
class TestUtils (line 31) | public class TestUtils {
method getTestDirectory (line 34) | static String getTestDirectory(String... path) {
method getTestDirectoryPath (line 38) | static Path getTestDirectoryPath(String... path) {
method insertRandomRecords (line 42) | static List<Record> insertRandomRecords(HaloDB db, int noOfRecords) th...
method insertRandomRecordsOfSize (line 46) | static List<Record> insertRandomRecordsOfSize(HaloDB db, int noOfRecor...
method generateRandomData (line 80) | static List<Record> generateRandomData(int noOfRecords) {
method updateRecords (line 100) | static List<Record> updateRecords(HaloDB db, List<Record> records) {
method updateRecordsWithSize (line 116) | static List<Record> updateRecordsWithSize(HaloDB db, List<Record> reco...
method deleteRecords (line 132) | static void deleteRecords(HaloDB db, List<Record> records) {
method deleteDirectory (line 142) | static void deleteDirectory(File directory) throws IOException {
method concatenateArrays (line 165) | static byte[] concatenateArrays(byte[] a, byte[] b) {
method generateRandomAsciiString (line 175) | static String generateRandomAsciiString(int length) {
method generateRandomAsciiString (line 186) | static String generateRandomAsciiString() {
method generateRandomByteArray (line 198) | public static byte[] generateRandomByteArray(int length) {
method generateRandomByteArray (line 205) | public static byte[] generateRandomByteArray() {
method waitForCompactionToComplete (line 216) | static void waitForCompactionToComplete(HaloDB db) {
method waitForTombstoneFileMergeComplete (line 227) | static void waitForTombstoneFileMergeComplete(HaloDB db) {
method getLatestDataFile (line 238) | static Optional<File> getLatestDataFile(String directory) {
method getDataFiles (line 244) | static List<File> getDataFiles(String directory) {
method getLatestCompactionFile (line 250) | static Optional<File> getLatestCompactionFile(String directory) {
method getFileCreationTime (line 256) | static FileTime getFileCreationTime(File file) throws IOException {
FILE: src/test/java/com/oath/halodb/TombstoneFileCleanUpTest.java
class TombstoneFileCleanUpTest (line 11) | public class TombstoneFileCleanUpTest extends TestBase {
method testDeleteAllRecords (line 13) | @Test(dataProvider = "Options")
method testDeleteAndInsertRecords (line 63) | @Test(dataProvider = "Options")
method testDeleteRecordsWithoutCompaction (line 98) | @Test(dataProvider = "Options")
method testWithCleanUpTurnedOff (line 156) | @Test(dataProvider = "Options")
method testCopyMultipleTombstoneFiles (line 197) | @Test(dataProvider = "Options")
method testMergeTombstoneFiles (line 254) | @Test(dataProvider = "Options")
method getFileId (line 332) | private int getFileId(String fileName) {
FILE: src/test/java/com/oath/halodb/TombstoneFileTest.java
class TombstoneFileTest (line 23) | public class TombstoneFileTest {
method before (line 32) | @BeforeMethod
method after (line 46) | @AfterMethod
method testRepairFileWithCorruptedEntry (line 54) | @Test
method testRepairFileWithCorruptedKeySize (line 76) | @Test
method testRepairFileWithIncompleteEntry (line 98) | @Test
method verifyData (line 121) | private void verifyData(TombstoneFile file, List<TombstoneEntry> recor...
method insertTestRecords (line 137) | private List<TombstoneEntry> insertTestRecords(int number) throws IOEx...
FILE: src/test/java/com/oath/halodb/UnsTest.java
class UnsTest (line 23) | public class UnsTest
method deinit (line 25) | @AfterMethod(alwaysRun = true)
method fillRandom (line 54) | private static void fillRandom()
method testDirectBufferFor (line 63) | @Test
method testAllocate (line 146) | @Test
method testAllocateTooMuch (line 157) | @Test(expectedExceptions = IOException.class)
method testGetTotalAllocated (line 163) | @Test
method testCopyMemory (line 195) | @Test
method equals (line 222) | private static void equals(byte[] ref, long adr, int off, int len)
method equals (line 228) | private static void equals(byte[] ref, byte[] arr, int off, int len)
method testSetMemory (line 234) | @Test
method testGetLongFromByteArray (line 255) | @Test
method testGetPutLong (line 279) | @Test
method testGetPutInt (line 302) | @Test
method testGetPutShort (line 325) | @Test
method testGetPutByte (line 348) | @Test
method testDecrementIncrement (line 374) | @Test
method testCompare (line 407) | @Test
method testCompareManyKeys (line 440) | @Test
FILE: src/test/java/com/oath/halodb/histo/EstimatedHistogramTest.java
class EstimatedHistogramTest (line 7) | public class EstimatedHistogramTest {
method testGetBuckets (line 9) | @Test
method testMin (line 19) | @Test
method testMax (line 30) | @Test
method testPercentile (line 47) | @Test
method testMean (line 60) | @Test
method testIsOverflowed (line 70) | @Test
method testToString (line 84) | @Test
method testEquals (line 97) | @Test
Condensed preview — 117 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (616K chars).
[
{
"path": ".github/workflows/maven-publish.yml",
"chars": 925,
"preview": "# This workflow will build a package using Maven and then publish it to GitHub packages when a release is created\n# For "
},
{
"path": ".github/workflows/maven.yml",
"chars": 570,
"preview": "# This workflow will build a Java project with Maven\n# For more information see: https://help.github.com/actions/languag"
},
{
"path": ".gitignore",
"chars": 29,
"preview": "target\n.idea\nhalodb.iml\ntmp/\n"
},
{
"path": ".travis.yml",
"chars": 49,
"preview": "language: java\ndist: trusty\n\njdk:\n - oraclejdk8\n"
},
{
"path": "CHANGELOG.md",
"chars": 629,
"preview": "# HaloDB Change Log\n\n## 0.4.3 (08/20/2018)\n* Sequence number, instead of relying on system time, is now a number increme"
},
{
"path": "CONTRIBUTING.md",
"chars": 1432,
"preview": "# How to contribute\nFirst, thanks for taking the time to contribute to our project! The following information provides a"
},
{
"path": "CONTRIBUTORS.md",
"chars": 85,
"preview": " HaloDB was designed and implemented by [Arjun Mannaly](https://github.com/amannaly) "
},
{
"path": "Code-of-Conduct.md",
"chars": 7497,
"preview": "# Oath Open Source Code of Conduct\n\n## Summary\nThis Code of Conduct is our way to encourage good behavior and discourage"
},
{
"path": "LICENSE",
"chars": 9137,
"preview": "Apache License\n\nVersion 2.0, January 2004\n\nhttp://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, "
},
{
"path": "NOTICE",
"chars": 1047,
"preview": "=========================================================================\nNOTICE file for use with, and corresponding to"
},
{
"path": "README.md",
"chars": 13280,
"preview": "# HaloDB\n\n[](https://travis-ci.org/yahoo/HaloDB)\n[!"
},
{
"path": "benchmarks/README.md",
"chars": 438,
"preview": "# Storage Engine Benchmark Tool. \n\nBuild the package using **mvn clean package** This will create a far jar *target/stor"
},
{
"path": "benchmarks/pom.xml",
"chars": 3397,
"preview": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:sc"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/BenchmarkTool.java",
"chars": 10680,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/Benchmarks.java",
"chars": 303,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/HaloDBStorageEngine.java",
"chars": 2284,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/KyotoStorageEngine.java",
"chars": 1919,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/RandomDataGenerator.java",
"chars": 785,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/RocksDBStorageEngine.java",
"chars": 4075,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "benchmarks/src/main/java/com/oath/halodb/benchmarks/StorageEngine.java",
"chars": 482,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "docs/WhyHaloDB.md",
"chars": 6471,
"preview": " \n# HaloDB at Yahoo.\n\nAt Yahoo, we built this high throughput, low latency distributed key-value database that runs in m"
},
{
"path": "docs/benchmarks.md",
"chars": 4503,
"preview": "# Benchmarks \n \n Benchmarks were run to compare HaloDB against RocksDB and KyotoCabinet.\n KyotoCabinet was chosen as"
},
{
"path": "pom.xml",
"chars": 4778,
"preview": "<!--\n ~ Copyright 2018, Oath Inc\n ~ Licensed under the terms of the Apache License 2.0. Please refer to accompanying L"
},
{
"path": "src/main/java/com/oath/halodb/CompactionManager.java",
"chars": 13301,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Constants.java",
"chars": 726,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/DBDirectory.java",
"chars": 2824,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/DBMetaData.java",
"chars": 4663,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/FileUtils.java",
"chars": 3097,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDB.java",
"chars": 3200,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBException.java",
"chars": 530,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBFile.java",
"chars": 12898,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBInternal.java",
"chars": 38549,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBIterator.java",
"chars": 4142,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBKeyIterator.java",
"chars": 3694,
"preview": "package com.oath.halodb;\n\nimport java.io.IOException;\nimport java.nio.channels.ClosedChannelException;\nimport java.util."
},
{
"path": "src/main/java/com/oath/halodb/HaloDBOptions.java",
"chars": 6325,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HaloDBStats.java",
"chars": 10148,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HashAlgorithm.java",
"chars": 343,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HashTableUtil.java",
"chars": 936,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/HashTableValueSerializer.java",
"chars": 549,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Hasher.java",
"chars": 9924,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/InMemoryIndex.java",
"chars": 2958,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/InMemoryIndexMetaData.java",
"chars": 1571,
"preview": "\n/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICE"
},
{
"path": "src/main/java/com/oath/halodb/InMemoryIndexMetaDataSerializer.java",
"chars": 739,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/IndexFile.java",
"chars": 4139,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/IndexFileEntry.java",
"chars": 4588,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/JNANativeAllocator.java",
"chars": 689,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/KeyBuffer.java",
"chars": 2884,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/LongArrayList.java",
"chars": 982,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/MemoryPoolAddress.java",
"chars": 1011,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/MemoryPoolChunk.java",
"chars": 6187,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/MemoryPoolHashEntries.java",
"chars": 631,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/NativeMemoryAllocator.java",
"chars": 409,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/NonMemoryPoolHashEntries.java",
"chars": 1352,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/OffHeapHashTable.java",
"chars": 3001,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/OffHeapHashTableBuilder.java",
"chars": 5048,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/OffHeapHashTableImpl.java",
"chars": 8848,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/OffHeapHashTableStats.java",
"chars": 4441,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Record.java",
"chars": 6353,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/RecordKey.java",
"chars": 611,
"preview": "package com.oath.halodb;\n\nimport java.util.*;\n\npublic class RecordKey {\n final byte[] key;\n public RecordKey(byte["
},
{
"path": "src/main/java/com/oath/halodb/Segment.java",
"chars": 2981,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/SegmentNonMemoryPool.java",
"chars": 16399,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/SegmentStats.java",
"chars": 2041,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/SegmentWithMemoryPool.java",
"chars": 15367,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/TombstoneEntry.java",
"chars": 3744,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/TombstoneFile.java",
"chars": 6696,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Uns.java",
"chars": 14909,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/UnsExt.java",
"chars": 635,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/UnsExt8.java",
"chars": 949,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/UnsafeAllocator.java",
"chars": 1036,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Utils.java",
"chars": 1315,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/Versions.java",
"chars": 404,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/main/java/com/oath/halodb/histo/EstimatedHistogram.java",
"chars": 10131,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/CheckOffHeapHashTable.java",
"chars": 6491,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/CheckSegment.java",
"chars": 2657,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/CompactionWithErrorsTest.java",
"chars": 7259,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/CrossCheckTest.java",
"chars": 20991,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DBDirectoryTest.java",
"chars": 3896,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DBMetaDataTest.java",
"chars": 3695,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DBRepairTest.java",
"chars": 7326,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DataConsistencyDB.java",
"chars": 4298,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DataConsistencyTest.java",
"chars": 8034,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/DoubleCheckOffHeapHashTableImpl.java",
"chars": 4210,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/FileUtilsTest.java",
"chars": 4413,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBCompactionTest.java",
"chars": 11674,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBDeletionTest.java",
"chars": 8140,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBFileCompactionTest.java",
"chars": 2758,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBFileTest.java",
"chars": 10582,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBIteratorTest.java",
"chars": 8938,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBKeyIteratorTest.java",
"chars": 3443,
"preview": "package com.oath.halodb;\n\nimport java.io.IOException;\nimport java.nio.channels.ClosedChannelException;\nimport java.util."
},
{
"path": "src/test/java/com/oath/halodb/HaloDBOptionsTest.java",
"chars": 1471,
"preview": "package com.oath.halodb;\n\nimport org.testng.Assert;\nimport org.testng.annotations.Test;\n\nimport static org.assertj.core."
},
{
"path": "src/test/java/com/oath/halodb/HaloDBStatsTest.java",
"chars": 10809,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HaloDBTest.java",
"chars": 21181,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HashTableTestUtils.java",
"chars": 8713,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HashTableUtilTest.java",
"chars": 1214,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HashTableValueSerializerTest.java",
"chars": 1928,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/HasherTest.java",
"chars": 1244,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/IndexFileEntryTest.java",
"chars": 2678,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/KeyBufferTest.java",
"chars": 5222,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/LinkedImplTest.java",
"chars": 1877,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/LongArrayListTest.java",
"chars": 853,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/MemoryPoolChunkTest.java",
"chars": 7597,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/NonMemoryPoolHashEntriesTest.java",
"chars": 2883,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/OffHeapHashTableBuilderTest.java",
"chars": 2961,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/RandomDataGenerator.java",
"chars": 741,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/RecordTest.java",
"chars": 3126,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/RehashTest.java",
"chars": 1639,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/SegmentWithMemoryPoolTest.java",
"chars": 10873,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/SequenceNumberTest.java",
"chars": 5382,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/SyncWriteTest.java",
"chars": 3950,
"preview": "package com.oath.halodb;\n\nimport org.testng.Assert;\nimport org.testng.annotations.Test;\n\nimport java.io.IOException;\nimp"
},
{
"path": "src/test/java/com/oath/halodb/TestBase.java",
"chars": 2160,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/TestListener.java",
"chars": 1319,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/TestUtils.java",
"chars": 8407,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/TombstoneFileCleanUpTest.java",
"chars": 14566,
"preview": "package com.oath.halodb;\n\nimport org.testng.Assert;\nimport org.testng.annotations.Test;\n\nimport java.io.File;\nimport jav"
},
{
"path": "src/test/java/com/oath/halodb/TombstoneFileTest.java",
"chars": 6225,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/UnsTest.java",
"chars": 12971,
"preview": "/*\n * Copyright 2018, Oath Inc\n * Licensed under the terms of the Apache License 2.0. Please refer to accompanying LICEN"
},
{
"path": "src/test/java/com/oath/halodb/histo/EstimatedHistogramTest.java",
"chars": 3734,
"preview": "package com.oath.halodb.histo;\n\nimport org.testng.Assert;\nimport org.testng.annotations.Test;\nimport org.testng.internal"
},
{
"path": "src/test/resources/log4j2-test.xml",
"chars": 980,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n ~ Copyright 2018, Oath Inc\n ~ Licensed under the terms of the Apache Lice"
}
]
About this extraction
This page contains the full source code of the yahoo/HaloDB GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 117 files (575.4 KB), approximately 133.1k tokens, and a symbol index with 1172 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.