Full Code of KentBeck/BPlusTree3 for AI

main ca80e4d85a99 cached
203 files
1.5 MB
378.3k tokens
1173 symbols
1 requests
Download .txt
Showing preview only (1,573K chars total). Download the full file or copy to clipboard to get everything.
Repository: KentBeck/BPlusTree3
Branch: main
Commit: ca80e4d85a99
Files: 203
Total size: 1.5 MB

Directory structure:
gitextract_q6j9thfa/

├── .claude/
│   └── system_prompt_additions.md
├── .devcontainer/
│   └── devcontainer.json
├── .github/
│   └── workflows/
│       ├── build-wheels.yml
│       ├── performance-tracking.yml
│       ├── python-ci.yml
│       ├── release.yml
│       └── rust-ci.yml
├── .gitignore
├── .vscode/
│   └── settings.json
├── Cargo.toml
├── LICENSE
├── README.md
├── agent.md
├── analyze_programming_time.py
├── arena_elimination_analysis.md
├── commits.txt
├── docs/
│   ├── adr/
│   │   └── ADR-003-compressed-node-limitations.md
│   ├── delete_operations_call_graph.md
│   ├── delete_optimization_plan.md
│   └── iteration_optimization_plan.md
├── python/
│   ├── CHANGELOG.md
│   ├── LICENSE
│   ├── MANIFEST.in
│   ├── README.md
│   ├── benchmarks/
│   │   └── performance_benchmark.py
│   ├── bplustree/
│   │   ├── __init__.py
│   │   └── bplus_tree.py
│   ├── bplustree_c_src/
│   │   ├── bplustree.h
│   │   ├── bplustree_module.c
│   │   ├── node_ops.c
│   │   └── tree_ops.c
│   ├── conftest.py
│   ├── coverage.xml
│   ├── docs/
│   │   ├── API_REFERENCE.md
│   │   ├── CAPACITY_OPTIMIZATION_ANALYSIS.md
│   │   ├── COMPETITIVE_ADVANTAGES.md
│   │   ├── C_EXTENSION_IMPROVEMENT_PLAN.md
│   │   ├── C_EXTENSION_SEGFAULT_FIX.md
│   │   ├── GA_READINESS_PLAN.md
│   │   ├── LOOKUP_PERFORMANCE_ANALYSIS.md
│   │   ├── OPTIMIZATION_RESULTS.md
│   │   ├── PERFORMANCE_HISTORY.md
│   │   ├── PERFORMANCE_OPTIMIZATION_PLAN.md
│   │   ├── README_benchmark.md
│   │   ├── STRUCTURAL_IMPROVEMENTS.md
│   │   ├── THREAD_SAFETY.md
│   │   ├── advanced_usage.md
│   │   ├── installation.md
│   │   ├── migration_guide.md
│   │   ├── performance_guide.md
│   │   ├── quickstart.md
│   │   └── troubleshooting.md
│   ├── examples/
│   │   ├── basic_usage.py
│   │   ├── migration_guide.py
│   │   ├── performance_demo.py
│   │   └── range_queries.py
│   ├── py.typed
│   ├── pyproject.toml
│   ├── setup.py
│   ├── tests/
│   │   ├── __init__.py
│   │   ├── _invariant_checker.py
│   │   ├── comprehensive_fuzz_test.py
│   │   ├── fuzz_test.py
│   │   ├── test_bplus_tree.py
│   │   ├── test_c_extension.py
│   │   ├── test_c_extension_comprehensive.py
│   │   ├── test_c_extension_segfault_fix.py
│   │   ├── test_compile_flags.py
│   │   ├── test_data_alignment.py
│   │   ├── test_dictionary_api.py
│   │   ├── test_docstyle.py
│   │   ├── test_fuzz_discovered_patterns.py
│   │   ├── test_gc_support.py
│   │   ├── test_gprof_harness.py
│   │   ├── test_import_error_fallback.py
│   │   ├── test_invariant_bug.py
│   │   ├── test_iterator.py
│   │   ├── test_iterator_modification_safety.py
│   │   ├── test_leak_detection.py
│   │   ├── test_max_occupancy_bug.py
│   │   ├── test_memory_leaks.py
│   │   ├── test_multithreaded_lookup.py
│   │   ├── test_no_segfaults.py
│   │   ├── test_node_split_minimal.py
│   │   ├── test_optimized_bplus_tree.py
│   │   ├── test_performance_baseline.py
│   │   ├── test_performance_benchmarks.py
│   │   ├── test_performance_regression.py
│   │   ├── test_performance_vs_sorteddict.py
│   │   ├── test_prefetch_microbench.py
│   │   ├── test_proper_deletion.py
│   │   ├── test_segfault_regression.py
│   │   ├── test_single_array_int_optimization.py
│   │   ├── test_single_child_parent.py
│   │   ├── test_stress_edge_cases.py
│   │   └── test_stress_large_datasets.py
│   └── tmp/
│       └── xcrun_db
├── rust/
│   ├── API_COMPLETION_ROADMAP.md
│   ├── API_COMPLETION_STATUS.md
│   ├── BTREEMAP_COMPARISON.md
│   ├── BTREE_ADVANTAGES.md
│   ├── Cargo.toml
│   ├── DELETE_PROFILING_REPORT.md
│   ├── ENTRY_API_TRADEOFFS.md
│   ├── HOTSPOT_ANALYSIS.md
│   ├── IMPLEMENTATION_ANALYSIS.md
│   ├── MEMORY_OPTIMIZATION_PLAN.md
│   ├── MEMORY_OPTIMIZATION_RESULTS.md
│   ├── MODULARIZATION_PLAN.md
│   ├── MODULARIZATION_PLAN_REVISED.md
│   ├── PERFORMANCE_ANALYSIS.md
│   ├── PERFORMANCE_LOG.md
│   ├── RANGE_SCAN_PROFILING_REPORT.md
│   ├── README.md
│   ├── RECOMMENDATIONS.md
│   ├── RUNTIME_PERFORMANCE_ANALYSIS.md
│   ├── benches/
│   │   ├── comparison.rs
│   │   ├── profiling_benchmark.rs
│   │   ├── quick_clone_bench.rs
│   │   └── range_scan_profiling.rs
│   ├── docs/
│   │   ├── BENCHMARK_RESULTS.md
│   │   ├── CLAUDE.md
│   │   ├── CODE_DUPLICATION_ANALYSIS.md
│   │   ├── COPY_PASTE_DETECTOR_SUMMARY.md
│   │   ├── FRESH_BENCHMARK_RESULTS_2025.md
│   │   ├── PERFORMANCE_BENCHMARKS.md
│   │   ├── PROJECT_STATUS.md
│   │   ├── RANGE_OPTIMIZATION_SUMMARY.md
│   │   ├── RANGE_QUERY_OPTIMIZATION_PLAN.md
│   │   ├── TEST_RELIABILITY_PLAN.md
│   │   ├── UPDATED_COPY_PASTE_ANALYSIS.md
│   │   ├── arena-allocation-learnings.md
│   │   ├── arena_migration_plan.md
│   │   ├── claude_refactoring.md
│   │   ├── code_coverage_analysis.md
│   │   ├── codex_refactoring.md
│   │   ├── concurrency_locking_strategies.md
│   │   ├── optimal_capacity_analysis.md
│   │   ├── parallel_vectors_vs_entries.md
│   │   └── rust_performance_history.md
│   ├── examples/
│   │   ├── comprehensive_comparison.rs
│   │   ├── find_optimal_capacity.rs
│   │   ├── quick_perf.rs
│   │   ├── range_syntax_demo.rs
│   │   └── readme_examples.rs
│   ├── focused_results/
│   │   └── custom_analysis.rs
│   ├── profiling_results/
│   │   ├── analysis_report.md
│   │   └── timing_analysis.rs
│   ├── src/
│   │   ├── bin/
│   │   │   ├── arena_profile.rs
│   │   │   ├── bound_check_test.rs
│   │   │   ├── delete_profiler.rs
│   │   │   ├── detailed_delete_profiler.rs
│   │   │   ├── function_profiler.rs
│   │   │   ├── instruments_delete_target.rs
│   │   │   ├── large_delete_benchmark.rs
│   │   │   ├── micro_range_bench.rs
│   │   │   ├── profile_functions.rs
│   │   │   ├── range_comparison.rs
│   │   │   └── range_profile.rs
│   │   ├── compact_arena.rs
│   │   ├── comprehensive_performance_benchmark.rs
│   │   ├── construction.rs
│   │   ├── delete_operations.rs
│   │   ├── detailed_iterator_analysis.rs
│   │   ├── error.rs
│   │   ├── get_operations.rs
│   │   ├── insert_operations.rs
│   │   ├── iteration.rs
│   │   ├── lib.rs
│   │   ├── macros.rs
│   │   ├── node.rs
│   │   ├── range_queries.rs
│   │   ├── tree_structure.rs
│   │   ├── types.rs
│   │   └── validation.rs
│   ├── tests/
│   │   ├── adversarial_arena_corruption.rs
│   │   ├── adversarial_branch_rebalancing.rs
│   │   ├── adversarial_edge_cases.rs
│   │   ├── adversarial_linked_list.rs
│   │   ├── bplus_tree.rs
│   │   ├── bug_reproduction_tests.rs
│   │   ├── critical_bug_test.rs
│   │   ├── debug_infinite_loop.rs
│   │   ├── enhanced_error_handling.rs
│   │   ├── error_handling_consistency.rs
│   │   ├── fuzz_tests.rs
│   │   ├── linked_list_corruption_detection.rs
│   │   ├── memory_leak_detection.rs
│   │   ├── memory_safety_audit.rs
│   │   ├── range_bounds_syntax.rs
│   │   ├── range_differential.rs
│   │   ├── remove_operations.rs
│   │   ├── simple_bug_tests.rs
│   │   ├── specific_bug_demos.rs
│   │   └── test_utils.rs
│   └── tools/
│       └── parse_time_profile.py
├── rust-toolchain.toml
├── scripts/
│   ├── analyze_benchmarks.py
│   ├── instruments_export.sh
│   └── precommit.sh
├── simple_time_analysis.py
├── test_coverage_analysis.md
└── visualize_programming_time.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .claude/system_prompt_additions.md
================================================
co# System Prompt Additions for Code Quality

## Code Quality Standards

NEVER write production code that contains:

1. **panic!() statements in normal operation paths** - always return Result<T, Error>
2. **memory leaks** - every allocation must have corresponding deallocation
3. **data corruption potential** - all state transitions must preserve data integrity
4. **inconsistent error handling patterns** - establish and follow single pattern

ALWAYS:

1. **Write comprehensive tests BEFORE implementing features**
2. **Include invariant validation in data structures**
3. **Use proper bounds checking for numeric conversions**
4. **Document known bugs immediately and fix them before continuing**
5. **Implement proper separation of concerns**
6. **Use static analysis tools (clippy, miri) before considering code complete**

## Development Process Guards

### TESTING REQUIREMENTS:
- Write failing tests first, then implement to make them pass
- Never commit code with #[should_panic] for bugs - fix the bugs
- Include property-based testing for data structures
- Test memory usage patterns, not just functionality
- Validate all edge cases and boundary conditions

### ARCHITECTURE REQUIREMENTS:
- Explicit error handling - no hidden panics or unwraps
- Memory safety - all unsafe code must be justified and audited
- Performance conscious - avoid unnecessary allocations/clones
- API design - consistent patterns across all public interfaces

### REVIEW CHECKPOINTS:

Before marking any code complete, verify:

1. **No compilation warnings**
2. **All tests pass (including stress tests)**
3. **Memory usage is bounded and predictable**
4. **No data corruption potential in any code path**
5. **Error handling is comprehensive and consistent**
6. **Code is modular and maintainable**
7. **Documentation matches implementation**
8. **Performance benchmarks show acceptable results**

## Rust-Specific Quality Standards

### ERROR HANDLING:
- Use Result<T, Error> for all fallible operations
- Define comprehensive error enums with context
- Never use unwrap() in production code paths
- Use ? operator for error propagation
- Provide meaningful error messages

### MEMORY MANAGEMENT:
- Audit all allocations for corresponding deallocations
- Use RAII patterns consistently
- Prefer borrowing over cloning when possible
- Use Cow<T> for conditional cloning
- Test for memory leaks in long-running scenarios

### DATA STRUCTURE INVARIANTS:
- Document all invariants in comments
- Implement runtime validation (behind feature flags)
- Test invariant preservation across all operations
- Use type system to enforce invariants where possible
- Validate state consistency at module boundaries

### MODULE ORGANIZATION:
- Single responsibility per module
- Clear public/private API boundaries
- Comprehensive module documentation
- Logical dependency hierarchy

## Critical Patterns to Avoid

### DANGEROUS PATTERNS:
```rust
// NEVER DO THIS - production panic
panic!("This should never happen");

// NEVER DO THIS - unchecked conversion
let id = size as u32; // Can overflow on 64-bit

// NEVER DO THIS - ignoring errors
some_operation().unwrap();

// NEVER DO THIS - leaking resources
let resource = allocate();
// ... no corresponding deallocation
```

### PREFERRED PATTERNS:
```rust
// DO THIS - proper error handling
fn operation() -> Result<T, MyError> {
    match risky_operation() {
        Ok(value) => Ok(process(value)),
        Err(e) => Err(MyError::from(e)),
    }
}

// DO THIS - safe conversion
let id: u32 = size.try_into()
    .map_err(|_| Error::InvalidSize(size))?;

// DO THIS - explicit error handling
let result = some_operation()
    .map_err(|e| Error::OperationFailed(e))?;

// DO THIS - RAII resource management
struct ResourceManager {
    resource: Resource,
}

impl Drop for ResourceManager {
    fn drop(&mut self) {
        self.resource.cleanup();
    }
}
```

## Testing Standards

### COMPREHENSIVE TEST COVERAGE:
- Unit tests for all public functions
- Integration tests for complex interactions
- Property-based tests for data structures
- Stress tests for long-running operations
- Memory leak detection tests
- Edge case and boundary condition tests

### TEST ORGANIZATION:
```rust
#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_normal_operation() {
        // Test typical usage patterns
    }

    #[test]
    fn test_edge_cases() {
        // Test boundary conditions
    }

    #[test]
    fn test_error_conditions() {
        // Test all error paths
    }

    #[test]
    fn test_invariants_preserved() {
        // Verify data structure invariants
    }
}

#[cfg(test)]
mod property_tests {
    use proptest::prelude::*;

    proptest! {
        #[test]
        fn test_invariant_always_holds(input in any::<InputType>()) {
            let result = operation(input);
            assert!(check_invariant(&result));
        }
    }
}
```

### MEMORY TESTING:
```rust
#[test]
fn test_no_memory_leaks() {
    let initial_count = get_allocation_count();

    {
        let mut structure = DataStructure::new();
        // Perform operations that allocate/deallocate
        for i in 0..1000 {
            structure.insert(i);
        }
        for i in 0..500 {
            structure.remove(i);
        }
    } // structure dropped here

    let final_count = get_allocation_count();
    assert_eq!(initial_count, final_count, "Memory leak detected");
}
```

## Documentation Standards

### CODE DOCUMENTATION:
- Document all public APIs with examples
- Explain complex algorithms and data structures
- Document invariants and preconditions
- Include safety notes for unsafe code
- Provide usage examples in doc comments

### ERROR DOCUMENTATION:
```rust
/// Inserts a key-value pair into the tree.
///
/// # Arguments
/// * `key` - The key to insert (must implement Ord)
/// * `value` - The value to associate with the key
///
/// # Returns
/// * `Ok(old_value)` if key existed (returns old value)
/// * `Ok(None)` if key was newly inserted
/// * `Err(Error::InvalidKey)` if key violates constraints
///
/// # Examples
/// ```
/// let mut tree = BPlusTree::new(4)?;
/// assert_eq!(tree.insert(1, "value")?, None);
/// assert_eq!(tree.insert(1, "new")?, Some("value"));
/// ```
///
/// # Panics
/// Never panics - all error conditions return Result
///
/// # Safety
/// This function maintains all tree invariants
pub fn insert(&mut self, key: K, value: V) -> Result<Option<V>, Error> {
    // Implementation
}
```

This system prompt addition should prevent the types of critical issues identified in the code review by establishing clear quality standards, testing requirements, and architectural principles that must be followed for all code.


================================================
FILE: .devcontainer/devcontainer.json
================================================
// The Dev Container format allows you to configure your environment. At the heart of it
// is a Docker image or Dockerfile which controls the tools available in your environment.
//
// See https://aka.ms/devcontainer.json for more information.
{
	"name": "Gitpod",
	// This universal image (~10GB) includes many development tools and languages,
	// providing a convenient all-in-one development environment.
	//
	// This image is already available on remote runners for fast startup. On desktop
	// and linux runners, it will need to be downloaded, which may take longer.
	//
	// For faster startup on desktop/linux, consider a smaller, language-specific image:
	// • For Python: mcr.microsoft.com/devcontainers/python:3.11
	// • For Node.js: mcr.microsoft.com/devcontainers/javascript-node:18
	// • For Go: mcr.microsoft.com/devcontainers/go:1.21
	// • For Java: mcr.microsoft.com/devcontainers/java:17
	//
	// Browse more options at: https://hub.docker.com/r/microsoft/devcontainers
	// or build your own using the Dockerfile option below.
	"image": "mcr.microsoft.com/devcontainers/universal:3.0.3"
	// Use "build":
	// instead of the image to use a Dockerfile to build an image.
	// "build": {
    //     "context": ".",
    //     "dockerfile": "Dockerfile"
    // }
	// Features add additional features to your environment. See https://containers.dev/features
	// Beware: features are not supported on all platforms and may have unintended side-effects.
	// "features": {
    //   "ghcr.io/devcontainers/features/docker-in-docker": {
    //     "moby": false
    //   }
    // }
}


================================================
FILE: .github/workflows/build-wheels.yml
================================================
name: Build Wheels

on:
  push:
    tags:
      - 'v*'
  pull_request:
    branches: [ main ]
  workflow_dispatch:

jobs:
  build-wheels:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Set up Python
      uses: actions/setup-python@v4
      with:
        python-version: '3.11'
    
    - name: Install build dependencies
      run: |
        python -m pip install --upgrade pip
        pip install build twine
    
    - name: Build wheel
      run: |
        cd python
        python -m build --wheel
    
    - name: Check wheel
      run: |
        cd python
        twine check dist/*.whl
    
    - name: Upload wheels as artifacts
      uses: actions/upload-artifact@v4
      with:
        name: wheels
        path: python/dist/*.whl


================================================
FILE: .github/workflows/performance-tracking.yml
================================================
name: Performance Tracking

on:
  push:
    branches: [ main ]
  schedule:
    # Run weekly on Sundays at 00:00 UTC
    - cron: '0 0 * * 0'
  workflow_dispatch:

jobs:
  performance:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Set up Python
      uses: actions/setup-python@v4
      with:
        python-version: '3.11'
    
    - name: Install dependencies
      run: |
        cd python
        pip install -e .[test,benchmark]
    
    - name: Run performance benchmarks
      run: |
        cd python
        echo "Running performance benchmarks..."
        timeout 10m python -m pytest tests/test_performance_benchmarks.py::TestPerformanceBenchmarks::test_insertion_performance_small -v --tb=short || echo "Performance benchmarks completed with issues"
        
        echo "Running performance regression tests..."
        timeout 10m python -m pytest tests/test_performance_regression.py -v --tb=short || echo "Performance regression tests completed with issues"
    
    - name: Archive performance results
      uses: actions/upload-artifact@v4
      with:
        name: performance-results
        path: python/performance_results.txt
      if: always()


================================================
FILE: .github/workflows/python-ci.yml
================================================
name: Python CI

on:
  push:
    branches: [ main ]
  pull_request:
    branches: [ main ]

jobs:
  test:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Set up Python
      uses: actions/setup-python@v4
      with:
        python-version: '3.11'
    
    - name: Install dependencies
      run: |
        cd python
        pip install -e .[test]
    
    - name: Build C extension
      run: |
        cd python
        BPLUSTREE_BUILD_C_EXTENSION=1 python setup.py build_ext --inplace
    
    - name: Run fast tests
      run: |
        cd python
        python -m pytest tests/ -m "not slow" -x -v
    
    - name: Run critical reliability tests
      run: |
        cd python
        echo "Running memory leak test (CRITICAL)..."
        timeout 5m python -m pytest tests/test_memory_leaks.py::TestMemoryLeaks::test_insertion_deletion_cycle_no_leak -v --tb=short
        
        echo "Running performance regression test (CRITICAL)..."
        timeout 3m python -m pytest tests/test_performance_benchmarks.py::TestPerformanceBenchmarks::test_insertion_performance_small -v --tb=short
        
        echo "Running invariant stress test (CRITICAL)..."
        timeout 3m python -m pytest tests/test_bplus_tree.py::TestSetItemSplitting::test_many_insertions_maintain_invariants -v --tb=short
        
        echo "Running C extension segfault tests (CRITICAL)..."
        timeout 2m python -m pytest tests/test_c_extension_segfault_fix.py -v --tb=short


================================================
FILE: .github/workflows/release.yml
================================================
name: Release

on:
  push:
    tags:
      - 'v*'

jobs:
  publish-rust:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Set up Rust
      uses: actions-rs/toolchain@v1
      with:
        toolchain: stable
        override: true
    
    - name: Build and test Rust crate
      run: |
        cd rust
        cargo build --release
        cargo test --release
    
    - name: Publish to crates.io
      env:
        CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
      run: |
        cd rust
        cargo publish --dry-run
        cargo publish
  
  publish-python:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Set up Python
      uses: actions/setup-python@v4
      with:
        python-version: '3.11'
    
    - name: Install build dependencies
      run: |
        python -m pip install --upgrade pip
        pip install build twine
    
    - name: Build wheel and source distribution
      run: |
        cd python
        python -m build
    
    - name: Publish to PyPI
      env:
        TWINE_USERNAME: __token__
        TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
      run: |
        cd python
        twine upload dist/* --skip-existing
  
  create-release:
    needs: [publish-rust, publish-python]
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Create GitHub Release
      uses: softprops/action-gh-release@v1
      with:
        tag_name: ${{ github.ref_name }}
        name: Release ${{ github.ref_name }}
        draft: false
        prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') || contains(github.ref_name, 'rc') }}
        generate_release_notes: true


================================================
FILE: .github/workflows/rust-ci.yml
================================================
name: Rust CI

on:
  push:
    branches: [ main ]
  pull_request:
    branches: [ main ]

jobs:
  test:
    runs-on: ubuntu-latest
    
    steps:
    - uses: actions/checkout@v4
    
    - name: Install Rust
      uses: dtolnay/rust-toolchain@stable
    
    - name: Check code formatting
      run: |
        cd rust
        cargo fmt --check
    
    - name: Run clippy
      run: |
        cd rust
        cargo clippy -- -D warnings
    
    - name: Build
      run: |
        cd rust
        cargo build --verbose
    
    - name: Run tests
      run: |
        cd rust
        cargo test --verbose


================================================
FILE: .gitignore
================================================
# Generated by Cargo
# will have compiled files and executables
debug/
target/

# These are backup files generated by rustfmt
**/*.rs.bk

# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

# RustRover
#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
#  and can be added to the global gitignore or merged into this file.  For a more nuclear
#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.claude/settings.local.json

# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
.pytest_cache/
.coverage
htmlcov/
*.log
*.tmp
*~
.DS_Store
fuzz_failure_*.py
# Build artifacts
*.o
src/python/build/

# Python packaging and distribution
python/build/
python/dist/
python/*.egg-info/
python/wheelhouse/
*.whl
*.tar.gz

# Temporary analysis files
plot_commits_vs_duration.py
commits_vs_duration_analysis.png
rust/test_simple.rs
# Profiling artifacts (do not commit)
rust/delete_profile.trace/
rust/delete_time_profile.xml
rust/delete_time_sample.xml
*.trace


================================================
FILE: .vscode/settings.json
================================================
{
    "rust-analyzer.cargo.features": ["testing"],
    "rust-analyzer.checkOnSave.allFeatures": false,
    "rust-analyzer.checkOnSave.features": ["testing"]
}


================================================
FILE: Cargo.toml
================================================
[workspace]
members = ["rust"]
resolver = "2"

[workspace.package]
version = "0.9.0"
authors = ["Kent Beck <kent@kentbeck.com>"]
license = "MIT"
repository = "https://github.com/KentBeck/BPlusTree3"
edition = "2021"

[workspace.dependencies]
rand = "0.8"
criterion = { version = "0.5", features = ["html_reports"] }
paste = "1.0"

[profile.release]
debug = true

================================================
FILE: LICENSE
================================================
MIT License

Copyright (c) 2025 Kent Beck

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
# BPlusTree

High-performance B+ tree implementations for **Rust** and **Python**, designed for efficient range queries and sequential access patterns.

## 🚀 **Dual-Language Implementation**

This project provides **complete, optimized B+ tree implementations** in both languages:

- **🦀 [Rust Implementation](./rust/)** - Zero-cost abstractions, arena-based memory management
- **🐍 [Python Implementation](./python/)** - Competitive with SortedDict, optimized for specific use cases

## 📊 **Performance Highlights**

### **Rust Implementation**

- **32-68% faster range scans** than std::BTreeMap (1.5-2.8x throughput)
- **23-68% faster GET operations** across all dataset sizes
- **2-22% faster insertions** with excellent scaling
- **Trade-off: 34% slower deletes** in optimized scenarios

### **Python Implementation**

- **Up to 2.5x faster** than SortedDict for partial range scans
- **1.4x faster** for medium range queries
- **Excellent scaling** for large dataset iteration

## 🎯 **Choose Your Implementation**

| Use Case                          | Rust                      | Python                        |
| --------------------------------- | ------------------------- | ----------------------------- |
| **Systems programming**           | ✅ Primary choice         | ❌                            |
| **High-performance applications** | ✅ Zero-cost abstractions | ⚠️ Good for specific patterns |
| **Database engines**              | ✅ Full control           | ⚠️ Limited                    |
| **Data analytics**                | ✅ Fast                   | ✅ Great for range queries    |
| **Rapid prototyping**             | ⚠️ Learning curve         | ✅ Easy integration           |
| **Existing Python codebase**      | ❌                        | ✅ Drop-in replacement        |

## 🚀 **Quick Start**

### Rust

```rust
use bplustree::BPlusTreeMap;

let mut tree = BPlusTreeMap::new(16).unwrap();
tree.insert(1, "one");
tree.insert(2, "two");

// Range queries with Rust syntax!
for (key, value) in tree.range(1..=2) {
    println!("{}: {}", key, value);
}
```

### Python

```python
from bplustree import BPlusTree

tree = BPlusTree(capacity=128)
tree[1] = "one"
tree[2] = "two"

# Range queries
for key, value in tree.range(1, 2):
    print(f"{key}: {value}")
```

## 📖 **Documentation**

- **📚 [Technical Documentation](./rust/docs/)** - Architecture, algorithms, benchmarks
- **🦀 [Rust Documentation](./rust/README.md)** - Rust-specific usage and examples
- **🐍 [Python Documentation](./python/README.md)** - Python-specific usage and examples

## Performance Characteristics

**BPlusTreeMap demonstrates significant performance advantages in range operations and read-heavy workloads compared to Rust's standard BTreeMap.** Comprehensive benchmarking across dataset sizes from 1K to 10M entries reveals that BPlusTreeMap consistently outperforms BTreeMap in range scans by 32-68%, delivering 1.5-2.8x higher throughput (67K-212K vs 44K-83K items/ms). GET operations show similarly strong advantages, with BPlusTreeMap performing 23-68% faster across all scales, making it particularly well-suited for read-heavy applications and analytical workloads.

**Insert performance is competitive to superior, with BPlusTreeMap showing 2-22% faster insertion speeds depending on dataset size and configuration.** The implementation scales exceptionally well, with larger datasets (>1M entries) showing the most pronounced advantages. However, delete operations represent the primary trade-off, with BPlusTreeMap performing 34% slower in optimized scenarios and 1.7-10.5x slower depending on capacity configuration, particularly at high capacities (1024+ elements per node).

**Capacity configuration is critical for optimal performance.** The B+ tree implementation allows tuning of node capacity, with optimal settings varying by use case: capacity 64-128 for datasets under 10K entries, 128-256 for medium datasets (10K-100K), and 256-512 for large datasets (100K-1M+). Proper configuration can achieve near-optimal performance across all operations, while misconfiguration (particularly high capacities with delete-heavy workloads) can significantly impact performance.

**BPlusTreeMap is recommended for range-heavy workloads (>20% range scans), read-heavy applications (>60% gets), large dataset analytics, and mixed workloads with light-to-moderate delete operations (<15% deletes).** Standard BTreeMap remains preferable for delete-heavy workloads, small datasets with unknown access patterns, or applications requiring zero configuration. The performance characteristics make BPlusTreeMap particularly valuable for database-like applications, time-series analysis, and any scenario where range queries and sequential access patterns dominate.

## 🏗️ **Architecture**

Both implementations share core design principles:

- **Arena-based memory management** for efficiency
- **Linked leaf nodes** for fast sequential access
- **Hybrid navigation** combining tree traversal + linked list iteration
- **Optimized rebalancing** with reduced duplicate lookups
- **Comprehensive testing** including adversarial test patterns

## 🛠️ **Development**

### Rust Development

```bash
cd rust/
cargo test --features testing
cargo bench
```

### Python Development

```bash
cd python/
pip install -e .
python -m pytest tests/
```

### Cross-Language Benchmarking

```bash
python scripts/analyze_benchmarks.py
```

## 🤝 **Contributing**

This project follows **Test-Driven Development** and **Tidy First** principles:

1. **Write tests first** - All features start with failing tests
2. **Small, focused commits** - Separate structural and behavioral changes
3. **Comprehensive validation** - Both implementations tested against reference implementations
4. **Performance awareness** - All changes benchmarked for performance impact

## 📄 **License**

This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.

## 🔗 **Links**

- **[GitHub Repository](https://github.com/KentBeck/BPlusTree3)**
- **[Rust Crate](https://crates.io/crates/bplustree)** _(coming soon)_
- **[Python Package](https://pypi.org/project/bplustree/)** _(coming soon)_

---

> Built with ❤️ following Kent Beck's **Test-Driven Development** methodology.


================================================
FILE: agent.md
================================================
# Engineering Conventions for BPlusTree3

- No feature flags for internal experiments. We have no external users, so avoid `#[cfg(feature = ...)]` branches. Implement improvements directly (or in short‑lived local branches) and remove experimental code before merging.

- Performance work
  - Validate with existing Criterion benches and the large delete runner (`rust/src/bin/large_delete_benchmark.rs`).
  - For line‑level CPU hotspots, use the Instruments workload (`rust/src/bin/instruments_delete_target.rs`) and store traces under `rust/delete_profile.trace` (not committed).
  - Prefer targeted, localized changes that don’t regress insert/get/range performance.

- Coding style
  - Keep changes minimal and focused on the stated goal.
  - Reduce repeated arena lookups and redundant separator/key reads in hot paths.
  - Favor bulk moves and pre‑allocation over per‑element operations.

- Benchmarks to run for delete changes
  - `cd rust && cargo bench --bench comparison deletion`
  - `cd rust && cargo run --release --bin large_delete_benchmark`
  - Optional: record Instruments trace for confirmation of hotspot reductions.

- Hygiene before commit
  - Always remove dead code introduced by refactors.
  - Delete code as soon as it is dead.
  - Always format the workspace: `cd rust && cargo fmt --all`.
  - Always run all tests: `cargo test --workspace` (and benches if relevant).


================================================
FILE: analyze_programming_time.py
================================================
#!/usr/bin/env python3
"""
Analyze programming time based on commit patterns.
Calculate time gaps between commits and visualize the results.
"""

import re
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import pandas as pd
from collections import defaultdict


def parse_git_log(log_output):
    """Parse git log output into structured data."""
    commits = []
    lines = log_output.strip().split("\n")

    for line in lines:
        if "|" in line:
            parts = line.split("|", 2)
            if len(parts) >= 3:
                commit_hash = parts[0]
                date_str = parts[1]
                message = parts[2]

                # Parse the date
                try:
                    # Format: 2025-06-08 14:56:12 -0700
                    dt = datetime.strptime(date_str.strip(), "%Y-%m-%d %H:%M:%S %z")
                    commits.append(
                        {
                            "hash": commit_hash,
                            "datetime": dt,
                            "message": message,
                            "date_str": date_str.strip(),
                        }
                    )
                except ValueError as e:
                    print(f"Error parsing date '{date_str}': {e}")

    # Sort by datetime (oldest first)
    commits.sort(key=lambda x: x["datetime"])
    return commits


def calculate_programming_sessions(commits, max_gap_minutes=120):
    """
    Calculate programming sessions based on commit gaps.
    If gap between commits is <= max_gap_minutes, assume continuous work.
    """
    if not commits:
        return []

    sessions = []
    current_session = {
        "start": commits[0]["datetime"],
        "end": commits[0]["datetime"],
        "commits": [commits[0]],
        "duration_minutes": 0,
    }

    for i in range(1, len(commits)):
        prev_commit = commits[i - 1]
        curr_commit = commits[i]

        gap_minutes = (
            curr_commit["datetime"] - prev_commit["datetime"]
        ).total_seconds() / 60

        if gap_minutes <= max_gap_minutes:
            # Continue current session
            current_session["end"] = curr_commit["datetime"]
            current_session["commits"].append(curr_commit)
            current_session["duration_minutes"] = (
                current_session["end"] - current_session["start"]
            ).total_seconds() / 60
        else:
            # Start new session
            sessions.append(current_session)
            current_session = {
                "start": curr_commit["datetime"],
                "end": curr_commit["datetime"],
                "commits": [curr_commit],
                "duration_minutes": 0,
            }

    # Add the last session
    sessions.append(current_session)

    return sessions


def analyze_daily_programming(sessions):
    """Group sessions by day and calculate daily totals."""
    daily_data = defaultdict(
        lambda: {"duration_minutes": 0, "sessions": 0, "commits": 0}
    )

    for session in sessions:
        date_key = session["start"].date()
        daily_data[date_key]["duration_minutes"] += session["duration_minutes"]
        daily_data[date_key]["sessions"] += 1
        daily_data[date_key]["commits"] += len(session["commits"])

    return dict(daily_data)


def create_visualizations(sessions, daily_data):
    """Create visualizations of programming time."""

    # Create figure with subplots
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12))
    fig.suptitle(
        "Programming Time Analysis for BPlusTree Repository",
        fontsize=16,
        fontweight="bold",
    )

    # 1. Daily programming time
    dates = sorted(daily_data.keys())
    daily_hours = [daily_data[date]["duration_minutes"] / 60 for date in dates]

    ax1.bar(dates, daily_hours, alpha=0.7, color="steelblue")
    ax1.set_title("Daily Programming Time (Hours)")
    ax1.set_ylabel("Hours")
    ax1.tick_params(axis="x", rotation=45)
    ax1.grid(True, alpha=0.3)

    # 2. Session timeline
    session_starts = [s["start"] for s in sessions]
    session_durations = [s["duration_minutes"] / 60 for s in sessions]

    ax2.scatter(session_starts, session_durations, alpha=0.6, color="orange", s=50)
    ax2.set_title("Programming Sessions Timeline")
    ax2.set_ylabel("Session Duration (Hours)")
    ax2.tick_params(axis="x", rotation=45)
    ax2.grid(True, alpha=0.3)

    # 3. Commits per day
    daily_commits = [daily_data[date]["commits"] for date in dates]

    ax3.bar(dates, daily_commits, alpha=0.7, color="green")
    ax3.set_title("Commits per Day")
    ax3.set_ylabel("Number of Commits")
    ax3.tick_params(axis="x", rotation=45)
    ax3.grid(True, alpha=0.3)

    # 4. Session duration distribution
    session_hours = [
        s["duration_minutes"] / 60 for s in sessions if s["duration_minutes"] > 0
    ]

    ax4.hist(session_hours, bins=20, alpha=0.7, color="purple", edgecolor="black")
    ax4.set_title("Session Duration Distribution")
    ax4.set_xlabel("Session Duration (Hours)")
    ax4.set_ylabel("Frequency")
    ax4.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig("programming_time_analysis.png", dpi=300, bbox_inches="tight")
    plt.show()


def print_summary(sessions, daily_data):
    """Print summary statistics."""
    total_minutes = sum(s["duration_minutes"] for s in sessions)
    total_hours = total_minutes / 60
    total_commits = sum(len(s["commits"]) for s in sessions)

    print("=" * 60)
    print("PROGRAMMING TIME ANALYSIS SUMMARY")
    print("=" * 60)
    print(
        f"Total Programming Time: {total_hours:.1f} hours ({total_minutes:.0f} minutes)"
    )
    print(f"Total Commits: {total_commits}")
    print(f"Total Sessions: {len(sessions)}")
    print(f"Average Session Length: {total_minutes/len(sessions):.1f} minutes")
    print(f"Programming Days: {len(daily_data)}")
    print(f"Average Hours per Day: {total_hours/len(daily_data):.1f} hours")
    print()

    # Top programming days
    top_days = sorted(
        daily_data.items(), key=lambda x: x[1]["duration_minutes"], reverse=True
    )[:5]
    print("TOP 5 PROGRAMMING DAYS:")
    for date, data in top_days:
        hours = data["duration_minutes"] / 60
        print(
            f"  {date}: {hours:.1f} hours ({data['commits']} commits, {data['sessions']} sessions)"
        )
    print()

    # Longest sessions
    longest_sessions = sorted(
        sessions, key=lambda x: x["duration_minutes"], reverse=True
    )[:5]
    print("LONGEST PROGRAMMING SESSIONS:")
    for i, session in enumerate(longest_sessions, 1):
        hours = session["duration_minutes"] / 60
        start_time = session["start"].strftime("%Y-%m-%d %H:%M")
        print(
            f"  {i}. {start_time}: {hours:.1f} hours ({len(session['commits'])} commits)"
        )


def main():
    # Read git log data from file or use command output
    try:
        # Try to get fresh git log data
        import subprocess

        result = subprocess.run(
            ["git", "log", "--pretty=format:%H|%ad|%s", "--date=iso", "--all"],
            capture_output=True,
            text=True,
            cwd=".",
        )
        if result.returncode == 0:
            git_log_output = result.stdout
        else:
            raise Exception("Git command failed")
    except:
        # Fallback to hardcoded data if git command fails
        git_log_output = """f94aa9479bba269ffa10dae4098b94fea8d0c86a|2025-06-08 14:56:12 -0700|feat: implement complete dictionary API for Python B+ Tree
1cde4ca8a86d3f1ddc6bba2033dde06600a65eca|2025-06-08 14:49:21 -0700|fix: resolve critical segfaults in C extension
b31b6b75955dba7608ea0faa116aba32014eb9c4|2025-06-08 13:19:24 -0700|style: apply code formatting to Rust implementation
150515273ea331ebe68c9fea15d6b6c7795d4494|2025-06-08 13:19:11 -0700|docs: add comprehensive GA readiness plan for Python implementation
e1f539e238077bfb1cdc72ee2adeeaf12febc780|2025-06-08 10:18:36 -0700|refactor: reorganize project structure for dual-language implementation
79a19eee2a4dac5c5574f79c895af8db58c92db6|2025-06-08 09:49:15 -0700|docs: add performance benchmark charts demonstrating optimization impact
054d1bd1db709e91525c2bd691c2a8cfc4bddf03|2025-06-08 09:48:06 -0700|Merge pull request #6 from KentBeck/feature/fuzz-testing-and-benchmarks"""

    # Parse commits
    commits = parse_git_log(git_log_output)

    if not commits:
        print("No commits found to analyze!")
        return

    # Calculate programming sessions (assuming gaps > 2 hours indicate breaks)
    sessions = calculate_programming_sessions(commits, max_gap_minutes=120)

    # Analyze daily data
    daily_data = analyze_daily_programming(sessions)

    # Print summary
    print_summary(sessions, daily_data)

    # Create visualizations
    create_visualizations(sessions, daily_data)


if __name__ == "__main__":
    main()


================================================
FILE: arena_elimination_analysis.md
================================================
# Fundamental Challenges of Eliminating Arena-Based Allocation in Rust B+ Tree Implementations

## Executive Summary

Arena-based allocation in the current BPlusTreeMap implementation creates **1.68x iteration overhead** compared to Rust's standard BTreeMap. This analysis examines the fundamental challenges of eliminating arena allocation while maintaining Rust's memory safety guarantees, and evaluates alternative approaches including Box-based allocation, Rc/RefCell, unsafe pointers, and generational indices.

## Current Arena Implementation Analysis

### Performance Baseline
- **Iteration overhead**: 35.61 ns per item vs BTreeMap
- **Memory overhead**: 112 bytes struct size vs 24 bytes for BTreeMap  
- **Cache behavior**: 7.08x slower for small ranges due to indirection
- **Lookup performance**: Actually 5% faster than BTreeMap for random access

### Core Architecture
```rust
pub struct BPlusTreeMap<K, V> {
    capacity: usize,
    root: NodeRef<K, V>,
    leaf_arena: Arena<LeafNode<K, V>>,      // Separate arena for leaves
    branch_arena: Arena<BranchNode<K, V>>,  // Separate arena for branches
}

pub enum NodeRef<K, V> {
    Leaf(NodeId, PhantomData<(K, V)>),      // NodeId = u32 index
    Branch(NodeId, PhantomData<(K, V)>),
}
```

### Fundamental Arena Challenges

#### 1. **Indirection Overhead**
Every node access requires:
1. Convert `NodeId` (u32) to `usize`
2. Index into `Vec<Option<T>>`  
3. Unwrap `Option` to access actual node
4. Potential cache miss from non-contiguous storage

#### 2. **Iterator Complexity**
```rust
pub struct ItemIterator<'a, K, V> {
    tree: &'a BPlusTreeMap<K, V>,
    current_leaf_id: Option<NodeId>,        // Requires arena lookup
    current_leaf_index: usize,
    // ... additional state
}
```
Each `next()` call involves arena access + linked list traversal vs BTreeMap's direct pointer chasing.

#### 3. **Memory Fragmentation**
- Arena slots can become fragmented after deletions
- `Vec<Option<T>>` wastes memory on `None` values
- Cannot shrink arena without invalidating existing NodeIds

## Alternative Approaches Analysis

### 1. Box-Based Direct Allocation

#### Approach
```rust
pub enum Node<K, V> {
    Leaf(Box<LeafNode<K, V>>),
    Branch(Box<BranchNode<K, V>>),
}

pub struct LeafNode<K, V> {
    keys: Vec<K>,
    values: Vec<V>,
    next: Option<Box<LeafNode<K, V>>>,  // Direct pointer instead of NodeId
}
```

#### Advantages
- **Zero indirection**: Direct heap pointers
- **Optimal cache behavior**: Each node is contiguous in memory
- **Automatic memory management**: Drop trait handles cleanup
- **Smaller memory footprint**: No arena overhead

#### Challenges
- **Borrowing conflicts**: Cannot hold mutable reference to parent while accessing child
- **Self-referential structures**: Rust's ownership prevents cycles
- **Split operations**: Difficult to return new nodes while maintaining tree structure
- **Iterator invalidation**: Mutable operations can invalidate iterators

#### Critical Borrowing Issue
```rust
// This fails to compile:
fn split_leaf(&mut self, leaf: &mut LeafNode<K, V>) -> Box<LeafNode<K, V>> {
    let new_leaf = leaf.split();  // Needs &mut self for allocation
    self.update_parent_pointers(); // Borrowing conflict!
    new_leaf
}
```

#### Verdict
**Impractical** - Rust's borrowing rules make tree mutations extremely difficult without unsafe code.

### 2. Rc/RefCell Interior Mutability

#### Approach
```rust
type NodePtr<K, V> = Rc<RefCell<Node<K, V>>>;

pub struct BPlusTreeMap<K, V> {
    root: NodePtr<K, V>,
}

pub enum Node<K, V> {
    Leaf {
        keys: Vec<K>,
        values: Vec<V>, 
        next: Option<NodePtr<K, V>>,
    },
    Branch {
        keys: Vec<K>,
        children: Vec<NodePtr<K, V>>,
    },
}
```

#### Advantages
- **Shared ownership**: Multiple references to same node
- **Interior mutability**: Can mutate through shared references
- **Reference cycles**: Supports parent-child relationships
- **Familiar patterns**: Similar to other languages' approaches

#### Challenges
- **Runtime borrow checking**: `RefCell` panics on borrow violations
- **Performance overhead**: Reference counting + runtime checks
- **Memory leaks**: Potential cycles prevent automatic cleanup
- **Complex error handling**: Runtime panics vs compile-time safety

#### Performance Analysis
```rust
// Each node access requires:
let node = node_ptr.borrow();  // Runtime borrow check
match &*node {                 // Deref + pattern match
    Node::Leaf { keys, .. } => { /* access */ }
}
// Automatic drop of borrow guard
```

**Estimated overhead**: 20-40% slower than arena due to:
- Reference counting operations
- Runtime borrow checking
- Additional indirection through RefCell

#### Verdict
**Possible but suboptimal** - Trades compile-time safety for runtime overhead and complexity.

### 3. Unsafe Raw Pointers

#### Approach
```rust
pub struct BPlusTreeMap<K, V> {
    root: *mut Node<K, V>,
    _phantom: PhantomData<(K, V)>,
}

pub enum Node<K, V> {
    Leaf {
        keys: Vec<K>,
        values: Vec<V>,
        next: *mut Node<K, V>,  // Raw pointer
    },
    Branch {
        keys: Vec<K>, 
        children: Vec<*mut Node<K, V>>,
    },
}
```

#### Advantages
- **Maximum performance**: Direct pointer access, no overhead
- **Full control**: Can implement any tree operation
- **Memory efficiency**: Minimal memory overhead
- **Flexibility**: Can optimize for specific use cases

#### Challenges
- **Memory safety**: Manual memory management required
- **Use-after-free**: Dangling pointers after node deletion
- **Double-free**: Potential double deletion bugs
- **Iterator safety**: Iterators can become invalid
- **Maintenance burden**: Complex unsafe code is hard to verify

#### Safety Requirements
```rust
unsafe impl<K, V> Send for BPlusTreeMap<K, V> 
where K: Send, V: Send {}

unsafe impl<K, V> Sync for BPlusTreeMap<K, V> 
where K: Sync, V: Sync {}

impl<K, V> Drop for BPlusTreeMap<K, V> {
    fn drop(&mut self) {
        unsafe {
            // Must manually traverse and free all nodes
            self.free_subtree(self.root);
        }
    }
}
```

#### Verdict
**High-performance but risky** - Requires extensive unsafe code and careful verification. Only suitable for performance-critical applications with expert developers.

### 4. Generational Indices (SlotMap Pattern)

#### Approach
```rust
use slotmap::{SlotMap, DefaultKey};

pub struct BPlusTreeMap<K, V> {
    nodes: SlotMap<DefaultKey, Node<K, V>>,
    root: DefaultKey,
}

pub enum Node<K, V> {
    Leaf {
        keys: Vec<K>,
        values: Vec<V>,
        next: Option<DefaultKey>,  // Generational index
    },
    Branch {
        keys: Vec<K>,
        children: Vec<DefaultKey>,
    },
}
```

#### Advantages
- **Memory safety**: Automatic detection of stale references
- **ABA problem solved**: Generational versioning prevents reuse issues
- **Stable references**: Keys remain valid across operations
- **Efficient storage**: Packed storage with O(1) access
- **Mature implementation**: Well-tested SlotMap crate

#### Challenges
- **Similar overhead to arena**: Still requires indirection
- **External dependency**: Adds crate dependency
- **Key size**: 64-bit keys vs 32-bit NodeIds
- **Limited improvement**: May not solve core performance issues

#### Performance Comparison
```rust
// Arena access:
let node = self.leaf_arena.get(node_id)?;  // Vec index + Option unwrap

// SlotMap access:  
let node = self.nodes.get(key)?;           // Similar Vec index + generation check
```

**Expected performance**: Similar to current arena implementation, possibly 5-10% slower due to generation checking.

#### Verdict
**Incremental improvement** - Provides better safety guarantees but doesn't address fundamental iteration performance issues.

## Hybrid Approaches

### 1. Box + Arena Hybrid
```rust
pub struct BPlusTreeMap<K, V> {
    root: Box<Node<K, V>>,
    // Keep arena for temporary storage during splits
    temp_arena: Arena<Node<K, V>>,
}
```

Use Box for normal tree structure, arena only during complex operations.

### 2. Unsafe + Safe Interface
```rust
pub struct BPlusTreeMap<K, V> {
    inner: UnsafeTree<K, V>,  // Raw pointers internally
}

impl<K, V> BPlusTreeMap<K, V> {
    pub fn get(&self, key: &K) -> Option<&V> {
        // Safe wrapper around unsafe implementation
        unsafe { self.inner.get(key) }
    }
}
```

Encapsulate unsafe implementation behind safe API.

### 3. Copy-on-Write Optimization
```rust
pub enum Node<K, V> {
    Owned(Box<NodeData<K, V>>),
    Borrowed(&'static NodeData<K, V>),  // For read-heavy workloads
}
```

Optimize for read-heavy scenarios with immutable sharing.

## Performance Projections

Based on analysis and benchmarking:

| Approach | Iteration Speed | Memory Usage | Safety | Complexity |
|----------|----------------|--------------|---------|------------|
| **Current Arena** | 1.68x slower | High | Safe | Medium |
| **Box-based** | ~1.0x (ideal) | Low | Compile issues | High |
| **Rc/RefCell** | 1.3-1.5x slower | Medium | Runtime panics | Medium |
| **Unsafe pointers** | 0.8-1.0x | Minimal | Manual | Very High |
| **SlotMap** | 1.6-1.8x slower | Medium | Safe | Low |

## Recommendations

### Short-term (Incremental Improvements)
1. **Arena optimization**: 
   - Use `Vec<T>` instead of `Vec<Option<T>>` with separate free list
   - Implement arena compaction to improve cache locality
   - Pre-allocate arena capacity based on expected tree size

2. **Iterator optimization**:
   - Cache leaf node references to reduce arena lookups
   - Implement iterator pooling to reduce allocation overhead
   - Add fast-path for sequential iteration

### Medium-term (Architectural Changes)
1. **Hybrid approach**: Use Box for leaf nodes (better iteration), arena for branch nodes (easier mutations)
2. **Specialized iterators**: Different iterator implementations for different use cases
3. **Memory layout optimization**: Pack related nodes together in memory

### Long-term (Fundamental Redesign)
1. **Unsafe core with safe wrapper**: Maximum performance with safety guarantees
2. **Pluggable allocation strategies**: Allow users to choose allocation method
3. **SIMD optimization**: Vectorized operations for large-scale iteration

## Conclusion

Eliminating arena-based allocation in Rust B+ trees faces fundamental challenges due to Rust's ownership system. While alternatives exist, each involves significant trade-offs:

- **Box-based allocation** is theoretically optimal but practically impossible due to borrowing conflicts
- **Rc/RefCell** provides flexibility but adds runtime overhead and complexity  
- **Unsafe pointers** offer maximum performance but require extensive verification
- **Generational indices** improve safety but don't address core performance issues

The **most practical approach** is incremental optimization of the existing arena system combined with specialized optimizations for iteration-heavy workloads. For applications requiring maximum performance, a carefully designed unsafe core with safe wrappers may be justified, but this requires significant development and verification effort.

The current arena-based approach, while not optimal for iteration, provides a good balance of safety, performance, and maintainability for most use cases. The 1.68x iteration overhead is acceptable given the benefits in insertion/deletion performance and memory safety guarantees.


================================================
FILE: commits.txt
================================================
2025-05-20 Initial commit
2025-05-20 test: verify new tree reports empty
2025-05-21 Merge pull request #1 from KentBeck/codex/implement-stub-apis-for-bplustree
2025-05-21 Add CLAUDE.md with TDD and Tidy First development guidelines
2025-05-21 Add branching factor and basic insert functionality
2025-05-21 Implement get method for BPlusTree
2025-05-21 Split get method tests for better isolation
2025-05-21 Refactor tree operations to delegate to LeafNode
2025-05-21 Add array storage for LeafNode entries
2025-05-21 Maintain sorted order in LeafNode items array
2025-05-21 Add range and slice operations to retrieve sorted entries
2025-05-21 Remove BTreeMap dependency in LeafNode implementation
2025-05-21 Refactor insert with helper function and add comprehensive tests
2025-05-21 Implement node splitting with linked list of leaves
2025-05-21 Add test for multiple inserts with non-sequential keys
2025-05-21 Add LeafFinder utility to optimize tree traversal
2025-05-21 Simplify LeafFinder with safe, recursive implementation
2025-05-21 Implement LeafFinder for arbitrary-length chains
2025-05-21 Make find_leaf_mut iterative to match find_leaf
2025-05-21 Simplify find_leaf_mut with elegant recursion
2025-05-21 Add explanatory comment for recursive find_leaf_mut
2025-05-21 Implement node splitting at any position in leaf chain
2025-05-21 Simplify insertion logic by checking fullness before inserting
2025-05-21 Inline insert method for simplicity
2025-05-21 Add is_full method to LeafNode
2025-05-21 Remove redundant root splitting code from insert
2025-05-21 Invert insertion logic for clarity
2025-05-22 Simplify splitting logic to only split the one full leaf
2025-05-22 Inline splitting logic directly into insert method
2025-05-22 Move node linking logic into split method
2025-05-22 Fix insertion bug after splitting
2025-05-22 comment
2025-05-22 Add comprehensive fuzz tests for B+ tree
2025-05-22 Add timed fuzz test with configurable duration
2025-05-22 Refactor LeafNode insertion logic for better code organization
2025-05-22 Don't re-search the whole list
2025-05-22 Cleanup
2025-05-22 Comment
2025-05-23 Useless comments
2025-05-23 comment
2025-05-23 Structural: Move fuzz tests to dedicated file
2025-05-23 Structural: Exclude fuzz tests from ordinary test runs
2025-05-23 Add comprehensive README with API documentation and fuzz test instructions
2025-05-23 Structural: Add prev field to LeafNode for future remove operations
2025-05-23 Add remove infrastructure for LeafNode operations
2025-05-23 Add rebalancing operations for LeafNode
2025-05-23 Refactor: Split remove infrastructure test into focused unit tests
2025-05-23 Implement basic BPlusTree::remove method
2025-05-23 Implement underflow handling for remove operations
2025-05-23 Remove unused methods to clean up warnings
2025-05-23 Add comprehensive tree validation function and integrate into tests
2025-05-26 Complete Step 6: Add comprehensive edge case tests for remove operations
2025-05-26 Remove unused prev field from LeafNode
2025-05-26 Move integration tests to tests/ directory following Rust conventions
2025-05-26 Improve Reading Order: Move BPlusTree public API to top of lib.rs
2025-05-26 docs: improve documentation for leaf_count and leaf_sizes methods
2025-05-26 refactor: rename 'root' field to 'leaves' for clarity
2025-05-26 docs: update plan for BranchNode implementation focusing on get & insert
2025-05-26 docs: add comprehensive test case lists for insertion & removal
2025-05-26 docs: update TDD approach to emphasize generalization after tests pass
2025-05-26 feat: implement Node trait and BranchNode structure (Step 1)
2025-05-26 ignore
2025-05-26 feat: implement LeafFinder with BranchNode support
2025-05-26 feat: implement BranchNode key navigation (Step 4)
2025-05-26 Dead code dead
2025-05-27 cleanup
2025-05-27 feat: add Python B+ tree implementation with dict-like API
2025-05-27 Leaves & root
2025-05-27 feat: implement LeafFinder path tracking and fix insertion bug (Step 2)
2025-05-27 feat: add ABC imports to Python BPlusTree implementation
2025-05-27 refactor: simplify __contains__ method in BPlusTreeMap
2025-05-27 feat: implement leaf node splitting in Python B+ tree
2025-05-27 feat: implement root promotion from LeafNode to BranchNode
2025-05-27 fix: correct key_count method to handle None next pointer
2025-05-27 feat: generalize __setitem__ to handle both leaf and branch root cases
2025-05-27 refactor: simplify code and add invariants checking for correctness
2025-05-27 test: add invariant checks to all tree-level tests
2025-05-27 refactor: swap if/else branches for better readability
2025-05-27 refactor: remove unused _size field and simplify insertion logic
2025-05-27 feat: implement parent node splitting for B+ tree
2025-05-28 refactor: convert __setitem__ to recursive implementation
2025-05-28 refactor: remove redundant insert_pos variable
2025-05-28 refactor: rename result to split_result for clarity
2025-05-28 refactor: remove unnecessary else after return
2025-05-28 feat: implement basic deletion from leaf root
2025-05-28 test: add test for removing multiple items from leaf root
2025-05-28 test: add test for removing non-existent key
2025-05-28 feat: implement recursive deletion for branch nodes
2025-05-28 test: add test for multiple removals from tree with branches
2025-05-28 feat: implement root collapse when branch has single child
2025-05-28 feat: implement Phase 1 - Node Underflow Detection
2025-05-28 feat: implement Phase 2 - Sibling Key Redistribution
2025-05-28 feat: implement Phase 3 - Node Merging
2025-05-28 feat: implement Phase 6 - Performance Optimizations
2025-05-28 Optimize deletion to reduce nodes
2025-05-28 feat: add comprehensive fuzz tester with operation tracking
2025-05-28 fix: resolve tree structure corruption bugs found by fuzz testing
2025-05-28 feat: add prepopulation option to fuzz tester for complex tree structures
2025-05-28 fix: resolve critical deletion bugs causing key loss during tree restructuring
2025-05-28 refactor: extract invariant checking logic to separate private module
2025-05-28 feat: implement efficient iterators for B+ tree traversal
2025-05-28 fix: improve consolidation logic and skip failing optimization tests
2025-05-28 fix: prevent maximum occupancy violations during node merging
2025-05-28 docs: add comprehensive performance analysis and competitive benchmarks
2025-05-28 perf: implement binary search optimization using bisect module
2025-05-28 feat: implement bulk loading optimization with 3x construction speedup
2025-05-28 refactor: add node helper methods to simplify calling code
2025-05-28 fix: update Python tests for minimum capacity of 4
2025-05-28 Remove unused functions and fix B+ tree implementation
2025-05-28 Completely remove optimization functions and their calls
2025-05-28 Refactor invariant checking: remove _invariant_checker field from BPlusTreeMap
2025-05-28 Performance analysis: B+ tree now competitive in range operations
2025-05-28 performance tuning evaluation
2025-05-28 comment
2025-05-28 fix: update minimum B+ tree capacity from 4 to 16 to avoid recursion depth issues
2025-05-28 refactor: add invariant checker support and clean up test files
2025-05-28 chore: clean up temporary analysis scripts and improve .gitignore
2025-05-28 Unused
2025-05-28 refactor: reorganize Python package structure for better maintainability
2025-05-28 refactor: improve Python code quality and documentation
2025-05-28 refactor: move invariant checker to tests directory
2025-05-28 style: apply consistent formatting to class definitions
2025-05-28 docs: add fuzz testing documentation to README
2025-05-29 Fix fuzz tests
2025-05-29 feat: implement switchable node architecture for performance optimization
2025-05-29 fix: resolve C extension memory corruption during node splits
2025-05-29 better claude instructions
2025-05-29 perf: optimize branching factor from 128 to 16 for 60% lookup improvement
2025-05-29 docs: add comprehensive performance history with commit references
2025-05-29 refactor: replace SIMD optimization with optimized comparison functions
2025-05-29 perf: optimize default capacity from 16 to 8 for 24% performance improvement
2025-05-29 Fix Rust tests: Update for Result-based constructor
2025-05-30 chore: regenerate Cargo.lock with clean dependency tree
2025-05-30 ancillary files
2025-05-30 cleanup: remove unused Python B+ tree variants and experimental code
2025-05-30 feat: expose C extension through package API with compatibility wrapper
2025-05-30 Behavioral: add gprof profiling section to lookup performance analysis doc
2025-05-31 docs: add C extension improvement plan
2025-05-31 Fix B+ tree Python implementation issues
2025-05-31 refactor: centralize tree traversal algorithm in BPlusTreeMap
2025-05-31 Revert "refactor: centralize tree traversal algorithm in BPlusTreeMap"
2025-05-31 Fix Rust function name and lifetime specifier
2025-05-31 Refactor: extract get_child method on BranchNode
2025-05-31 Fix: remove duplicate generic parameter in new_root function
2025-05-31 Refactor: extract removal methods for LeafNode and BranchNode
2025-05-31 Add get_child_mut method and refactor child access patterns
2025-05-31 Fix syntax error in get_recursive function
2025-05-31 C extension: remove memory pool stubs, update improvement plan, adjust performance_vs_sorteddict test
2025-05-31 Add pytest hook to build C extension in-place and clean up build ignores
2025-05-31 Phase 1: extract node_clear_slot helper, update improvement plan, ignore .o files
2025-05-31 Refactor: introduce InsertResult enum and new_insert method
2025-05-31 Phase 2.1.2 (Green): align node data to cache-line & use cache_aligned_alloc/free
2025-05-31 Phase 2.1.2: update improvement plan to mark green step complete
2025-05-31 C extension Phase 2.1.3: Remove dead allocator code paths and unify free logic
2025-05-31 Refactor LeafNode::new_insert to eliminate redundant binary searches
2025-05-31 docs: record Phase 2.1.3 dead allocator removal performance in history
2025-06-01 Mark test-only functions with feature flag to exclude from production builds
2025-06-01 Complete feature flag implementation for test-only functions
2025-06-01 Reorganize BPlusTreeMap functions in logical order
2025-06-01 Document conditional compilation and IDE behavior for test functions
2025-06-01 Reorganize LeafNode and BranchNode functions in logical order
2025-06-01 tests: add prefetch microbenchmark harness and mark Phase 3.2.1 complete in improvement plan
2025-06-01 c extension: inject PREFETCH hints in tree_find_leaf (Phase 3.2.2)
2025-06-01 c extension Phase 3.2.3: encapsulate prefetch calls behind node_prefetch_child helper and update improvement plan
2025-06-01 c extension: opt-in for -ffast-math and -march=native, default -O3 baseline in setup.py (Phase 4.1.1)
2025-06-01 tests: add compile-flag safety test and mark Phase 4.1.2 complete in improvement plan
2025-06-01 c extension: clean up extra_compile_args formatting (Phase 4.1.3)
2025-06-01 Enable strict invariant checking for all B+ tree operations
2025-06-01 Implement basic borrowing and merging for leaf nodes
2025-06-01 tests: add GC-support regression test (Phase 5.1.1 behavioral)
2025-06-01 Fix splitting logic and min_keys calculation
2025-06-01 Fix critical bug in branch rebalancing logic
2025-06-01 Fix root branch node invariant checking
2025-06-01 All tests now passing after fixing root branch invariant
2025-06-01 C extension: Extract common GC traversal helper for node_traverse and node_clear_gc (5.1.3)
2025-06-01 Add comprehensive performance optimization documentation
2025-06-01 C extension: Add multithreaded lookup microbenchmark harness (5.2.1)
2025-06-01 C extension: Enable GIL release for lookup loops (5.2.2)
2025-06-01 C extension: Factor GIL-release blocks into ENTER_TREE_LOOP/EXIT_TREE_LOOP macros (5.2.3)
2025-06-01 C extension: Clean up import-fallback logic and update module docstring (5.3.3)
2025-06-01 Clean up arena code and get all Rust tests passing
2025-06-01 docs: complete Phase 5.4 – enable docstyle checks and add C-extension docstrings
2025-06-01 Disable doctests in Cargo.toml
2025-06-01 Unused
2025-06-01 Fix Python C extension segfault by removing unsafe GIL release, restoring leaf/branch split hygiene, and cleaning debug instrumentation
2025-06-01 Add arena infrastructure for B+ tree memory management
2025-06-02 Add arena-based allocation infrastructure for leaf nodes
2025-06-02 feat: add ArenaLeaf variant to NodeRef (Stage 1)
2025-06-02 feat: implement ArenaLeaf traversal operations (Stage 2)
2025-06-02 feat: make root use ArenaLeaf (Stage 3)
2025-06-02 feat: implement SplitWithArena mechanism (Stage 4 partial)
2025-06-02 feat: implement arena-based branch nodes (BranchNode arena support)
2025-06-02 fix: improve arena-based operations and reduce failing tests
2025-06-02 cleanup: simplify deep tree handling to avoid invariant violations
2025-06-02 fix: eliminate Box node creation in arena-based implementation
2025-06-02 refactor: consolidate node allocation to arena-based methods
2025-06-02 fix: eliminate Box allocations from insertion path
2025-06-03 fix: implement proper branch node borrowing during deletion
2025-06-03 refactor: migrate to arena-only NodeRef implementation
2025-06-03 refactor: rename ArenaLeaf to Leaf and ArenaBranch to Branch
2025-06-03 refactor: simplify InsertResult enum to remove redundant Split variants
2025-06-03 refactor: simplify arena allocation to start from ID 0
2025-06-03 refactor: eliminate next_id fields with helper methods
2025-06-03 docs: add comprehensive performance analysis and benchmarking tools
2025-06-03 refactor: eliminate NodeId wrapper in favor of direct usize
2025-06-03 refactor: remove non-functional get/get_mut/remove methods from BranchNode
2025-06-03 refactor: remove unused and broken methods from node types
2025-06-03 fix: implement proper split-before-insert for leaf nodes
2025-06-03 fix: maintain leaf linked list during split operations
2025-06-03 style: clean up whitespace and formatting
2025-06-03 fix: maintain leaf linked list during merge operations
2025-06-03 refactor: remove unused LeafNode methods from pre-arena implementation
2025-06-03 feat: implement efficient linked-list-based iterator
2025-06-03 docs: add comprehensive capacity analysis and performance results
2025-06-03 style: apply code formatting
2025-06-03 fix: update fuzz tests to use minimum capacity of 4
2025-06-03 docs: add comprehensive code coverage analysis report
2025-06-04 refactoring plans
2025-06-04 Phase 1: Add with_branch/with_branch_mut/with_leaf/with_leaf_mut helpers and tests
2025-06-04 Phase 2: Add find_child/find_child_mut helpers and tests
2025-06-04 Phase 3: Add NodeRef id() and is_leaf() helpers with tests
2025-06-05 refactor: eliminate nested if-let patterns with Option combinators
2025-06-05 Refactor merge_with_left_branch and merge_with_right_branch to use Option + match for cleaner early returns
2025-06-05 Refactor merge_with_right_branch to use Option combinators
2025-06-05 refactor: formatting improvements from linter and documentation updates
2025-06-05 refactor: replace nested if let patterns with Option combinators for cleaner code
2025-06-05 refactor: improve leaf insertion logic with early return pattern
2025-06-05 refactor: simplify Option combinator patterns with cleaner match expressions
2025-06-05 refactor: simplify leaf borrowing and branch merge patterns with cleaner match expressions
2025-06-05 refactor: move NodeRef tests from src/lib.rs to tests/bplus_tree.rs
2025-06-05 refactor: unify get_mut with recursive pattern and add value overwrite test
2025-06-05 refactor: simplify branch sibling lookup with match patterns
2025-06-05 refactor: replace remove with recursive pattern following insert design
2025-06-05 docs: remove outdated Phase 4 section and delete plan.md
2025-06-05 refactor: improve code organization and formatting in remove operations
2025-06-05 refactor: add polymorphic helpers for borrowing and merging operations
2025-06-05 refactor: use Option combinator for linked list pointer update
2025-06-05 refactor: simplify nested if-let with Option combinator chain
2025-06-05 refactor: replace multiple if-let patterns with Option combinators
2025-06-05 docs: add design analysis of parallel vectors vs entry vector
2025-06-05 docs: add concurrency control analysis for B+ trees
2025-06-06 feat: Add comprehensive fuzz testing, benchmarks, and range query optimization plan
2025-06-06 cleanup
2025-06-06 Merge pull request #5 from KentBeck/feature/fuzz-testing-and-benchmarks
2025-06-06 feat: implement optimized range query iterator
2025-06-06 docs: add comprehensive performance benchmark results and analysis
2025-06-07 test: add comprehensive adversarial tests based on coverage analysis
2025-06-07 feat: implement Rust range syntax support for range queries
2025-06-07 fix: resolve compiler warnings
2025-06-08 optimize: eliminate duplicate arena node lookups in rebalancing operations
2025-06-08 feat: implement comprehensive code duplication elimination
2025-06-08 Merge pull request #6 from KentBeck/feature/fuzz-testing-and-benchmarks
2025-06-08 docs: add performance benchmark charts demonstrating optimization impact
2025-06-08 refactor: reorganize project structure for dual-language implementation
2025-06-08 docs: add comprehensive GA readiness plan for Python implementation
2025-06-08 style: apply code formatting to Rust implementation
2025-06-08 fix: resolve critical segfaults in C extension
2025-06-08 feat: implement complete dictionary API for Python B+ Tree
2025-06-08 docs: add comprehensive documentation and examples for Python implementation
2025-06-08 feat: add comprehensive programming time analysis tools
2025-06-09 feat: implement modern Python packaging infrastructure
2025-06-09 feat: implement comprehensive testing suite for Phase 3 QA
2025-06-09 fix: correct Python wheels workflow paths and configuration
2025-06-09 docs: create comprehensive documentation suite for Phase 3.2
2025-06-09 docs: complete comprehensive documentation suite for Phase 3.2
2025-06-09 fix: update GitHub Actions to use latest non-deprecated versions
2025-06-10 style: apply Black formatting to resolve CI lint failures
2025-06-10 fix: eliminate all Rust compiler warnings
2025-06-10 feat: implement comprehensive performance benchmarking and optimization suite
2025-06-10 refactor: use test utility functions in adversarial_edge_cases.rs
2025-06-10 refactor: use test utility functions in remove_operations.rs
2025-06-10 feat: add populate_sequential_int_x10 utility and refactor tests
2025-06-10 feat: implement comprehensive release engineering and GA automation
2025-06-10 fix: correct shell syntax in cibuildwheel Linux build command
2025-06-10 fix: use absolute path for yum and skip ARM64 macOS tests
2025-06-10 fix: simplify Linux build setup for manylinux containers
2025-06-10 fix: remove CIBW_BEFORE_BUILD_LINUX entirely
2025-06-10 fix: import BPlusTreeMap from package in dictionary API tests
2025-06-10 feat: add missing dictionary methods to pure Python BPlusTreeMap
2025-06-10 fix: add missing dictionary methods to C extension wrapper
2025-06-10 refactor: eliminate duplicate __init__.py and fix package structure
2025-06-10 refactor: hide internal Node classes from public API
2025-06-11 refactor: remove get_implementation from public API
2025-06-11 fix: resolve GitHub Actions build failures by correcting Python package structure
2025-06-11 refactor: rename bplustree3 back to bplustree and clean up duplicate code
2025-06-11 fix: temporarily disable C extension to stabilize CI builds
2025-06-11 docs: fix package name references from bplustree3 to bplustree
2025-06-11 fix: correct remaining bplustree3 references and simplify wheel tests
2025-06-11 Replace BPlusTree3 with BPlusTree
2025-06-11 fix: correct import statements in test files after package restructuring
2025-06-11 More package naming
2025-06-11 ci: simplify workflows to achieve stable green builds
2025-06-11 ci: add debug workflow to isolate build failure
2025-06-11 fix: replace cibuildwheel with standard build for pure Python package
2025-06-11 Phase 1: Clean slate CI rebuild - Replace all workflows with simple Rust CI

================================================
FILE: docs/adr/ADR-003-compressed-node-limitations.md
================================================
# ADR-003: Compressed Node Limitations and Future Directions

## Status
Accepted

## Context

During implementation of compressed branch and leaf nodes (`CompressedBranchNode` and `CompressedLeafNode`), we discovered fundamental limitations with the compressed storage approach when dealing with generic key-value types.

### Current Implementation Issues

The compressed nodes store data in fixed-size byte arrays using raw pointer arithmetic:
- `CompressedBranchNode<K, V>` uses `data: [u64; 27]` 
- `CompressedLeafNode<K, V>` uses `data: [u64; 32]`

This approach works for simple `Copy` types but creates critical problems for heap-allocated data:

1. **Memory Manager Invisibility**: When `K` or `V` types contain heap-allocated data (e.g., `String`, `Vec`, `Box`), the memory manager cannot trace references stored within the compressed byte arrays.

2. **Garbage Collection Issues**: References to heap data become invisible to Rust's ownership system, potentially leading to:
   - Use-after-free bugs
   - Memory leaks
   - Double-free errors

3. **Generic Type Constraints**: The compressed format requires `K: Copy` and `V: Copy`, severely limiting the types that can be stored.

### Example Problematic Scenario

```rust
// This would be unsafe with compressed nodes:
let tree = BPlusTree::<String, Vec<u8>>::new(16);
tree.insert("key".to_string(), vec![1, 2, 3, 4]);

// The String and Vec are heap-allocated, but stored as raw bytes
// in the compressed node's fixed array. The memory manager loses
// track of these allocations.
```

## Decision

**We will NOT use compressed nodes for general-purpose B+ tree storage** due to the fundamental incompatibility with Rust's memory management for heap-allocated types.

However, we identify a **viable specialized use case**: Fixed-type trees optimized for specific data patterns.

## Rationale

### Why General Compression Fails
- Rust's ownership model requires visible references for heap-allocated data
- Raw byte storage breaks the ownership chain
- Generic types (`K`, `V`) can be arbitrarily complex with nested heap allocations
- No safe way to serialize/deserialize arbitrary types in fixed byte arrays

### Why Specialized Fixed-Type Trees Could Work

For Facebook graph data storage requirements, we could implement:

```rust
pub struct FixedGraphTree {
    // Fixed key type - no heap allocation
    keys: u64,           // Node IDs, timestamps, etc.
    
    // Variable-sized values - managed separately
    values: Vec<u8>,     // Serialized graph data
}
```

Benefits:
- `u64` keys are `Copy` and fit perfectly in compressed storage
- Variable-sized `Vec<u8>` values can be managed with proper Rust ownership
- No fixed "number of keys" capacity constraint for leaves
- Optimized for graph data patterns (numeric IDs + binary payloads)

## Consequences

### Positive
- **Memory Safety**: Avoid unsafe memory management issues
- **Rust Compatibility**: Work with Rust's ownership model, not against it
- **Specialized Performance**: Fixed-type trees can be highly optimized
- **Clear Boundaries**: Separate concerns between generic trees and specialized storage

### Negative
- **Limited Generality**: Compressed nodes cannot be used for arbitrary `K`, `V` types
- **Code Duplication**: May need separate implementations for different use cases
- **Complexity**: Multiple tree variants increase maintenance burden

## Implementation Notes

### Current Status
- Generic compressed nodes are implemented but should be considered **experimental only**
- All existing tests pass, but usage is limited to `Copy` types
- Performance benefits are significant for supported types

### Future Work
If Facebook graph storage requirements justify the effort:

1. **Implement `FixedGraphTree`**:
   ```rust
   pub struct FixedGraphTree {
       root: Option<FixedGraphNode>,
   }
   
   struct FixedGraphNode {
       keys: [u64; N],           // Fixed-size key array
       values: Vec<Vec<u8>>,     // Variable-sized value storage
       children: [NodeId; N+1],  // Child references
   }
   ```

2. **Variable Capacity Leaves**: Remove fixed capacity constraints to handle varying data sizes efficiently.

3. **Optimized Serialization**: Custom serialization for graph-specific data patterns.

## Alternatives Considered

1. **Smart Pointer Compression**: Store `Rc<K>`, `Arc<V>` in compressed format
   - **Rejected**: Still breaks ownership visibility, adds reference counting overhead

2. **Custom Allocator Integration**: Hook into Rust's allocator to track compressed references
   - **Rejected**: Too complex, fragile, and non-portable

3. **Trait-Based Serialization**: Require `K: Serialize`, `V: Serialize`
   - **Rejected**: Performance overhead, complexity, still doesn't solve ownership issues

## References
- [Rust Ownership Model](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html)
- [Memory Safety in Systems Programming](https://www.memorysafety.org/)
- Facebook Graph Storage Requirements (internal documentation)

---

**Date**: 2025-01-17  
**Authors**: Development Team  
**Reviewers**: Architecture Team


================================================
FILE: docs/delete_operations_call_graph.md
================================================
# Delete Operations Call Graph Analysis

## Overview

This document provides a comprehensive analysis of the delete operations call graph in the BPlusTreeMap implementation. The delete system is designed with clear separation of concerns, optimized arena access patterns, and robust rebalancing strategies.

## Call Graph Structure

### 📱 API Entry Points

The delete operations expose two public methods:

```rust
// Primary deletion method
pub fn remove(&mut self, key: &K) -> Option<V>

// Error-handling wrapper (Python-style)
pub fn remove_item(&mut self, key: &K) -> ModifyResult<V>
```

**Design Decision**: `remove_item` is a thin wrapper around `remove` that converts `None` results to `KeyNotFound` errors, providing both Rust-style (`Option`) and Python-style (`Result`) APIs.

### 🔄 Main Deletion Flow

```
remove(key)
├── remove_recursive(root, key) -> RemoveResult<V>
│   ├── [LEAF CASE] leaf.remove(key) -> (Option<V>, bool)
│   └── [BRANCH CASE] 
│       ├── get_child_for_key(id, key) -> (usize, NodeRef)
│       ├── remove_recursive(child, key) [RECURSIVE CALL]
│       └── [IF CHILD UNDERFULL] rebalance_child(parent_id, child_index)
└── [IF REMOVED] collapse_root_if_needed()
```

#### Key Characteristics:

1. **Single Recursive Function**: Only `remove_recursive` uses recursion, following the tree structure downward.

2. **Bottom-Up Rebalancing**: Rebalancing happens on the way back up the recursion stack, ensuring child nodes are balanced before their parents.

3. **Conditional Rebalancing**: Rebalancing only occurs if:
   - A key was actually removed (`removed_value.is_some()`)
   - The child became underfull (`child_became_underfull`)

4. **Root Management**: After successful deletion, `collapse_root_if_needed()` handles the special case where the root might need to be collapsed.

### ⚖️ Rebalancing Subsystem

The rebalancing subsystem is the most complex part of the delete operations, implementing a sophisticated strategy pattern:

```
rebalance_child(parent_id, child_index)
├── OPTIMIZATION: Batch sibling information gathering
│   ├── check_node_can_donate(left_sibling) -> bool
│   └── check_node_can_donate(right_sibling) -> bool
├── [LEAF CASE] rebalance_leaf(parent_id, child_index, sibling_info)
└── [BRANCH CASE] rebalance_branch(parent_id, child_index, sibling_info)
```

#### Rebalancing Strategies:

**Strategy 1: Borrowing (Preferred)**
```
├── [BORROW FROM LEFT] borrow_from_left_{leaf|branch}(parent_id, child_index)
└── [BORROW FROM RIGHT] borrow_from_right_{leaf|branch}(parent_id, child_index)
```

**Strategy 2: Merging (Fallback)**
```
├── [MERGE WITH LEFT] merge_with_left_{leaf|branch}(parent_id, child_index)
└── [MERGE WITH RIGHT] merge_with_right_{leaf|branch}(parent_id, child_index)
```

#### Design Principles:

1. **Left Preference**: Always prefer left siblings for consistency and predictable behavior.

2. **Strategy Hierarchy**: Try borrowing before merging to minimize structural changes.

3. **Type-Specific Handling**: Separate implementations for leaf and branch nodes, but unified strategy logic.

4. **Optimized Arena Access**: All sibling information is gathered in a single pass to minimize expensive arena lookups.

### 🏗️ Root Management

```
collapse_root_if_needed()
├── [LOOP] Continue until no more collapsing needed
├── get_branch(root_id) -> check if single child
├── [IF SINGLE CHILD] promote child to root
└── [IF NO CHILDREN] create_empty_root_leaf()
```

**Root Collapse Scenarios**:
- **Single Child Branch**: Promote the only child to become the new root
- **Empty Branch**: Create a new empty leaf as the root
- **Multiple Children**: No action needed

### 🔍 Helper Functions

The system includes several optimized helper functions:

```
├── check_node_can_donate(node_ref) -> bool
│   ├── [LEAF] keys.len() > min_keys()
│   └── [BRANCH] keys.len() > min_keys()
├── get_child_for_key(branch_id, key) -> (usize, NodeRef)
└── is_node_underfull(node_ref) -> bool
```

## Performance Optimizations

### 🚀 Arena Access Optimization

**Problem**: Original implementation performed multiple arena accesses per rebalancing operation.

**Solution**: Batch all sibling information gathering in `rebalance_child()`:

```rust
// BEFORE: Multiple arena accesses
let left_can_donate = self.can_node_donate(&left_sibling);  // Arena access 1
let right_can_donate = self.can_node_donate(&right_sibling); // Arena access 2

// AFTER: Single batched access
let rebalance_info = {
    let parent_branch = self.get_branch(parent_id)?; // Single arena access
    // Gather all sibling information in one pass
    (child_is_leaf, left_sibling_info, right_sibling_info)
};
```

**Performance Impact**: 7-9% improvement in delete operations.

### 🎯 Strategy Pattern Benefits

1. **Clear Decision Logic**: Borrowing vs merging decisions are made once with cached information.

2. **Reduced Complexity**: Each strategy method focuses on a single responsibility.

3. **Maintainable Code**: Easy to understand and modify individual strategies.

## Error Handling and Edge Cases

### Robust Error Handling

1. **Invalid Arena Access**: All arena accesses use `Option` types and handle `None` gracefully.

2. **Malformed Trees**: The system can handle edge cases like empty branches or missing siblings.

3. **Root Edge Cases**: Special handling for root collapse scenarios.

### Edge Case Scenarios

1. **Single Node Tree**: Handled by root management system.

2. **Minimum Capacity Trees**: Careful handling of nodes at minimum key thresholds.

3. **Deep Trees**: Recursive deletion works correctly regardless of tree depth.

## Code Quality Characteristics

### ✅ Strengths

1. **Clear Separation of Concerns**: API, recursion, rebalancing, and root management are cleanly separated.

2. **Optimized Performance**: Batched arena access and efficient strategy selection.

3. **Readable Code**: Method names clearly indicate their purpose and scope.

4. **Comprehensive Testing**: All major code paths are covered by tests.

5. **Consistent Patterns**: Left-preference and strategy hierarchy are applied consistently.

### 🔧 Design Decisions

1. **Bottom-Up Rebalancing**: Ensures children are balanced before parents, maintaining tree invariants.

2. **Conditional Operations**: Only perform expensive operations when necessary.

3. **Strategy Pattern**: Clean separation between different rebalancing approaches.

4. **Batched Information Gathering**: Minimize expensive arena access operations.

## Future Optimization Opportunities

### Phase 1 Remaining Optimizations

1. **Lazy Rebalancing**: Defer rebalancing until absolutely necessary.

2. **Bulk Delete Operations**: Optimize for deleting multiple keys.

3. **Predictive Rebalancing**: Use deletion patterns to optimize rebalancing decisions.

### Phase 2+ Advanced Optimizations

1. **Specialized Delete Algorithms**: Fast paths for common deletion patterns.

2. **Memory Layout Optimizations**: Improve cache locality during rebalancing.

3. **Unsafe Optimizations**: Carefully applied unsafe code for performance-critical paths.

## Conclusion

The delete operations call graph demonstrates a well-architected system with:

- **Clean API Design**: Simple public interface with complex internal implementation
- **Optimized Performance**: Strategic arena access batching and efficient algorithms
- **Maintainable Code**: Clear separation of concerns and consistent patterns
- **Robust Error Handling**: Graceful handling of edge cases and malformed data

The current implementation achieves a 7-9% performance improvement over the original design while maintaining code readability and correctness. The foundation is solid for future optimization phases.

## References

- [Delete Optimization Plan](delete_optimization_plan.md)
- [BPlusTreeMap Implementation](../rust/src/delete_operations.rs)
- [Performance Benchmarks](../rust/examples/comprehensive_comparison.rs)


================================================
FILE: docs/delete_optimization_plan.md
================================================
# Delete Operation Optimization Plan

## Current Performance Analysis

Based on comprehensive benchmarks, delete operations show significant performance issues:

- **100 items**: BPlusTreeMap 3.44x slower than BTreeMap
- **1000 items**: BPlusTreeMap 4.84x slower than BTreeMap  
- **10000 items**: BPlusTreeMap 6.29x slower than BTreeMap

**Performance degradation increases with dataset size**, indicating algorithmic inefficiencies.

## Root Cause Analysis

### Primary Performance Bottlenecks

1. **Excessive Arena Access** (~40% of overhead)
   - Multiple `get_branch()` calls per delete operation
   - Redundant arena lookups during rebalancing
   - No caching of frequently accessed nodes

2. **Complex Rebalancing Logic** (~30% of overhead)
   - Always checks for rebalancing even when unnecessary
   - Multiple sibling lookups for donation/merge decisions
   - Recursive rebalancing propagation up the tree

3. **Inefficient Sibling Management** (~20% of overhead)
   - Linear search through children to find siblings
   - Separate arena access for each sibling check
   - Redundant `can_node_donate()` calculations

4. **Linked List Maintenance** (~10% of overhead)
   - Updates leaf linked list pointers during merges
   - Not optimized for bulk operations
   - Potential cache misses from pointer chasing

## Optimization Phases

### Phase 1: High-Impact, Low-Risk Optimizations (Target: -50% overhead)

**Estimated Timeline**: 2-3 days  
**Risk Level**: Low  
**Expected Gain**: 2-3x performance improvement

#### TODO 1.1: Reduce Arena Access Frequency

**Current Issue**: Multiple arena lookups per delete operation

**Optimizations**:
- [ ] Cache parent branch during rebalancing operations
- [ ] Batch sibling information gathering in single arena access
- [ ] Pre-fetch sibling nodes when rebalancing is likely
- [ ] Implement node reference caching for hot paths

**Target**: Reduce arena access by 60-70%

#### TODO 1.2: Optimize Rebalancing Decision Logic

**Current Issue**: Always performs expensive rebalancing checks

**Optimizations**:
- [ ] Add fast path for nodes that don't need rebalancing
- [ ] Implement lazy rebalancing (defer until necessary)
- [ ] Cache node fullness information
- [ ] Skip rebalancing for nodes above minimum threshold

**Target**: Eliminate 70% of unnecessary rebalancing operations

#### TODO 1.3: Streamline Sibling Operations

**Current Issue**: Inefficient sibling lookup and management

**Optimizations**:
- [ ] Pre-compute sibling information during parent access
- [ ] Batch sibling donation checks
- [ ] Optimize merge operations with bulk data movement
- [ ] Cache sibling node references

**Target**: Reduce sibling operation overhead by 50%

### Phase 2: Medium-Impact, Medium-Risk Optimizations (Target: -30% remaining overhead)

**Estimated Timeline**: 3-4 days  
**Risk Level**: Medium  
**Expected Gain**: 1.5-2x additional improvement

#### TODO 2.1: Implement Bulk Delete Operations

**Current Issue**: Single-key deletion is inefficient for multiple operations

**Optimizations**:
- [ ] Add `remove_many()` method for bulk deletions
- [ ] Batch rebalancing operations across multiple deletions
- [ ] Defer linked list updates until end of bulk operation
- [ ] Optimize for sequential key deletion patterns

#### TODO 2.2: Advanced Rebalancing Strategies

**Current Issue**: Naive rebalancing approach

**Optimizations**:
- [ ] Implement predictive rebalancing based on deletion patterns
- [ ] Add node splitting instead of just merging
- [ ] Optimize for common deletion scenarios (sequential, random)
- [ ] Implement lazy propagation of rebalancing up the tree

#### TODO 2.3: Memory Layout Optimizations

**Current Issue**: Poor cache locality during rebalancing

**Optimizations**:
- [ ] Optimize node layout for deletion-heavy workloads
- [ ] Implement prefetching for likely-to-be-accessed nodes
- [ ] Reduce memory allocations during rebalancing
- [ ] Optimize data movement during merges

### Phase 3: High-Impact, High-Risk Optimizations (Target: -20% remaining overhead)

**Estimated Timeline**: 5-7 days  
**Risk Level**: High  
**Expected Gain**: 1.2-1.5x additional improvement

#### TODO 3.1: Specialized Delete Algorithms

**Current Issue**: Generic algorithm doesn't optimize for common patterns

**Optimizations**:
- [ ] Implement fast path for leaf-only deletions
- [ ] Add optimized algorithm for sequential deletions
- [ ] Implement batch processing for clustered deletions
- [ ] Add specialized handling for root-level operations

#### TODO 3.2: Unsafe Optimizations

**Current Issue**: Safe Rust overhead in critical paths

**Optimizations**:
- [ ] Add unsafe fast paths for verified scenarios
- [ ] Implement unchecked arena access where safe
- [ ] Optimize memory copying with unsafe operations
- [ ] Add unsafe bulk data movement operations

## Implementation Strategy

### Recommended Approach

1. **Start with Phase 1**: Focus on arena access and rebalancing optimizations
2. **Measure incrementally**: Benchmark after each optimization
3. **Maintain correctness**: All existing tests must pass
4. **Document safety**: Clear documentation for any unsafe optimizations

### Success Criteria

- **Minimum Goal**: Reduce delete overhead to 2x slower than BTreeMap
- **Target Goal**: Achieve 1.5x slower than BTreeMap
- **Stretch Goal**: Match or exceed BTreeMap performance

### Risk Mitigation

- **Comprehensive testing**: Each optimization must pass full test suite
- **Performance regression detection**: Automated benchmarking
- **Rollback capability**: Each phase as separate commits
- **Safety validation**: Extensive testing of unsafe optimizations

## Expected Performance Improvements

### Phase 1 Results
- **100 items**: 3.44x → 1.7x slower (50% improvement)
- **1000 items**: 4.84x → 2.4x slower (50% improvement)  
- **10000 items**: 6.29x → 3.1x slower (50% improvement)

### Phase 2 Results
- **100 items**: 1.7x → 1.2x slower (additional 30% improvement)
- **1000 items**: 2.4x → 1.7x slower (additional 30% improvement)
- **10000 items**: 3.1x → 2.2x slower (additional 30% improvement)

### Phase 3 Results
- **100 items**: 1.2x → 1.0x (match BTreeMap)
- **1000 items**: 1.7x → 1.2x slower (additional 20% improvement)
- **10000 items**: 2.2x → 1.5x slower (additional 20% improvement)

This plan provides a systematic approach to optimizing delete operations while managing implementation risk and maintaining code quality.


================================================
FILE: docs/iteration_optimization_plan.md
================================================
# Iteration Optimization Plan

## Overview

Based on detailed profiling analysis showing BPlusTreeMap iteration is 2.9x slower than BTreeMap (127.6ns vs 75.5ns per item), this document outlines a systematic approach to closing the performance gap.

## Current Performance Analysis

- **BPlusTreeMap**: 127.6ns per item
- **BTreeMap**: 75.5ns per item  
- **Performance gap**: 52.1ns (69% slower)
- **Target**: Reduce gap to <20ns (within 25% of BTreeMap)

## Root Cause Breakdown (from profiling)

1. **Complex end bound checking**: ~15ns (29% of overhead)
2. **Abstraction layer overhead**: ~11ns (21% of overhead) 
3. **Arena access indirection**: ~8ns (15% of overhead)
4. **Additional bounds checking**: ~6ns (12% of overhead)
5. **Option combinator overhead**: ~5ns (10% of overhead)
6. **Cache misses from indirection**: ~7ns (13% of overhead)

## Optimization Phases

### Phase 1: High-Impact, Low-Risk Optimizations (Target: -20ns)

**Estimated Timeline**: 1-2 days  
**Risk Level**: Low  
**Expected Gain**: 15-25ns improvement

#### TODO 1.1: Simplify End Bound Checking (Target: -12ns)

**Current Issue**: Complex Option combinator chains in `try_get_next_item()`

```rust
// Current: Complex and slow (~15ns)
let beyond_end = self
    .end_key
    .map(|end_key| key > end_key)
    .or_else(|| {
        self.end_bound_key
            .as_ref()
            .map(|end_bound| {
                if self.end_inclusive {
                    key > end_bound
                } else {
                    key >= end_bound
                }
            })
    })
    .unwrap_or(false);
```

**Optimization**: Direct conditional logic

```rust
// Optimized: Simple and fast (~3ns)
let beyond_end = if let Some(end_key) = self.end_key {
    key > end_key
} else if let Some(ref end_bound) = self.end_bound_key {
    if self.end_inclusive {
        key > end_bound
    } else {
        key >= end_bound
    }
} else {
    false
};
```

- [ ] Replace Option combinators with direct if-let chains in `try_get_next_item()`
- [ ] Update all bound checking logic to use direct conditionals
- [ ] Run existing range tests to validate correctness
- [ ] Benchmark performance improvement

#### TODO 1.2: Inline Critical Path Methods (Target: -5ns)

**Current Issue**: Method calls not inlined in hot path

- [ ] Add `#[inline]` to `try_get_next_item()` method
- [ ] Add `#[inline]` to `advance_to_next_leaf()` method  
- [ ] Add `#[inline]` to other iteration-specific hot path methods
- [ ] Run performance benchmarks to validate improvement
- [ ] Ensure no code size bloat from excessive inlining

#### TODO 1.3: Optimize Option Handling (Target: -3ns)

**Current Issue**: Excessive Option wrapping/unwrapping

```rust
// Current: Multiple Option operations
let result = self.current_leaf_ref.and_then(|leaf| self.try_get_next_item(leaf));

// Optimized: Direct access with early return
let leaf = match self.current_leaf_ref {
    Some(leaf) => leaf,
    None => return None,
};
let result = self.try_get_next_item(leaf);
```

- [ ] Replace Option combinators with explicit matching in main iteration loop
- [ ] Use early returns instead of Option chaining
- [ ] Simplify control flow in `next()` method
- [ ] Run existing iterator tests to ensure correctness

### Phase 2: Medium-Impact, Medium-Risk Optimizations (Target: -15ns)

**Estimated Timeline**: 2-3 days  
**Risk Level**: Medium  
**Expected Gain**: 10-20ns improvement

#### TODO 2.1: Reduce Arena Access Frequency (Target: -8ns)

**Current Issue**: Arena lookup in `advance_to_next_leaf()`

- [ ] Extend `ItemIterator` struct with next leaf caching:
  ```rust
  pub struct ItemIterator<'a, K, V> {
      // Current caching
      current_leaf_ref: Option<&'a LeafNode<K, V>>,
      
      // Extended caching - cache next leaf too
      next_leaf_ref: Option<&'a LeafNode<K, V>>,
      next_leaf_id: Option<NodeId>,
  }
  ```
- [ ] Cache next leaf reference during current leaf processing
- [ ] Eliminate arena access in most `advance_to_next_leaf()` calls
- [ ] Only access arena when cache misses
- [ ] Add comprehensive iterator tests for new caching logic
- [ ] Validate memory safety with extended caching

#### TODO 2.2: Optimize Bounds Checking (Target: -4ns) ✅ COMPLETED

**Current Issue**: Redundant bounds checks in `get_key()`/`get_value()`

- [x] Add unsafe variants of accessor methods to `LeafNode`
- [x] Implement single bounds check + unsafe access pattern:
  ```rust
  // Optimized: Single bounds check + unsafe access
  if self.current_leaf_index >= leaf.keys_len() {
      return None;
  }
  let (key, value) = unsafe { leaf.get_key_value_unchecked(self.current_leaf_index) };
  ```
- [x] Add comprehensive safety documentation for unsafe methods
- [x] Create extensive bounds checking tests (existing test suite validates correctness)
- [x] Add fuzzing tests for edge cases (existing fuzz tests cover this)
- [x] Benchmark performance improvement

**Results**: Successfully implemented unsafe accessor methods with comprehensive safety documentation. All tests pass, performance improved by eliminating redundant bounds checks in iteration hot path.

#### TODO 2.3: Streamline Control Flow (Target: -3ns) ✅ COMPLETED

**Current Issue**: Complex nested matching and looping

- [x] Restructure main iteration loop to reduce indirection
- [x] Flatten control flow with fewer branches
- [x] Implement direct flow pattern:
  ```rust
  'outer: loop {
      let leaf = self.current_leaf_ref?;
      
      // Try current leaf first
      if let Some(item) = self.try_get_next_item(leaf) {
          return Some(item);
      }
      
      // Advance to next leaf - if false, we're done
      if !self.advance_to_next_leaf_direct() {
          return None;
      }
  }
  ```
- [x] Run comprehensive iterator behavior tests
- [x] Validate edge cases (empty trees, single leaf, etc.)

**Results**: Successfully streamlined control flow by eliminating the `finished` flag and using `current_leaf_ref.is_none()` as terminal state. Simplified `advance_to_next_leaf_direct()` with bool return. Performance improved by ~0.36ns per item, bringing ratio from 1.41x to 1.22x vs BTreeMap (within 22-25% of target).

### Phase 3: High-Impact, High-Risk Optimizations (Target: -10ns)

**Estimated Timeline**: 3-5 days  
**Risk Level**: High  
**Expected Gain**: 8-15ns improvement

#### TODO 3.1: Specialized Iterator Variants (Target: -8ns)

**Current Issue**: Generic iterator handles all cases inefficiently

- [ ] Design specialized iterator types:
  ```rust
  // Unbounded iterator (no end checking)
  pub struct UnboundedItemIterator<'a, K, V> { /* simplified */ }
  
  // Bounded iterator (optimized end checking)  
  pub struct BoundedItemIterator<'a, K, V> { /* end-optimized */ }
  
  // Single-leaf iterator (no advancement needed)
  pub struct SingleLeafIterator<'a, K, V> { /* no arena access */ }
  ```
- [ ] Implement pattern detection at iterator creation time
- [ ] Route to specialized iterator implementation based on usage pattern
- [ ] Eliminate unnecessary checks for each specialized pattern
- [ ] Add extensive compatibility testing
- [ ] Validate performance improvements for each variant

#### TODO 3.2: Memory Layout Optimization (Target: -5ns)

**Current Issue**: Poor cache locality due to arena indirection

- [ ] Implement cache prefetching for next leaf:
  ```rust
  fn prefetch_next_leaf(&self) {
      if let Some(leaf) = self.current_leaf_ref {
          if leaf.next != NULL_NODE {
              // Prefetch next leaf into cache
              unsafe {
                  std::intrinsics::prefetch_read_data(
                      self.tree.get_leaf_ptr(leaf.next), 
                      3 // High locality
                  );
              }
          }
      }
  }
  ```
- [ ] Add platform-specific prefetch implementations
- [ ] Test cross-platform compatibility
- [ ] Measure cache performance improvements
- [ ] Add feature flags for platform-specific optimizations

### Phase 4: Experimental Optimizations (Target: -5ns)

**Estimated Timeline**: 1-2 weeks  
**Risk Level**: Very High  
**Expected Gain**: 0-10ns improvement (uncertain)

#### TODO 4.1: SIMD-Optimized Bounds Checking (Target: -3ns)

- [ ] Research SIMD applicability for batch bound checks
- [ ] Implement SIMD-based comparison operations where possible
- [ ] Add platform detection and fallback mechanisms
- [ ] Extensive cross-platform testing

#### TODO 4.2: Custom Arena Layout (Target: -4ns)

- [ ] Analyze arena memory layout for iteration patterns
- [ ] Design iteration-optimized arena structure
- [ ] Implement custom layout with better locality
- [ ] Validate major architectural changes

#### TODO 4.3: Compile-Time Specialization (Target: -2ns)

- [ ] Research const generics for compile-time optimization
- [ ] Implement specialized variants using const generics
- [ ] Balance compilation time vs runtime performance

## Implementation Strategy

### Recommended Approach

- [ ] **Start with Phase 1**: Implement all low-risk, high-impact optimizations first
- [ ] **Measure after each change**: Validate improvements incrementally using benchmarks
- [ ] **Proceed to Phase 2**: Only if Phase 1 gains are insufficient for target
- [ ] **Consider Phase 3**: Only for specialized high-performance use cases
- [ ] **Avoid Phase 4**: Unless absolutely necessary for competitive parity

### Success Criteria

- [ ] **Minimum Goal**: Reduce gap to 30ns (within 40% of BTreeMap)
- [ ] **Target Goal**: Reduce gap to 20ns (within 25% of BTreeMap)  
- [ ] **Stretch Goal**: Reduce gap to 10ns (within 15% of BTreeMap)

### Risk Mitigation

- [ ] **Comprehensive testing**: Each optimization must pass full test suite
- [ ] **Performance regression detection**: Set up automated benchmarking
- [ ] **Rollback capability**: Implement each phase as separate commits
- [ ] **Documentation**: Clear documentation of safety invariants for unsafe code
- [ ] **Code review**: Thorough review of all performance-critical changes

### Expected Timeline

- [ ] **Phase 1**: 1-2 days → 15-25ns improvement → 102-112ns per item
- [ ] **Phase 2**: 2-3 days → 10-20ns improvement → 82-102ns per item  
- [ ] **Phase 3**: 3-5 days → 8-15ns improvement → 67-94ns per item
- [ ] **Total**: 1-2 weeks → 33-60ns improvement → Target achieved

## Progress Tracking

### Phase 1 Progress
- [x] TODO 1.1: Simplify End Bound Checking
- [x] TODO 1.2: Inline Critical Path Methods  
- [x] TODO 1.3: Optimize Option Handling

##### Phase 2 Progress  
- [ ] TODO 2.1: Reduce Arena Access Frequency (SKIPPED)
- [x] TODO 2.2: Optimize Bounds Checking
- [x] TODO 2.3: Streamline Control Flow

### Phase 3 Progress
- [ ] TODO 3.1: Specialized Iterator Variants
- [ ] TODO 3.2: Memory Layout Optimization

### Phase 4 Progress
- [ ] TODO 4.1: SIMD-Optimized Bounds Checking
- [ ] TODO 4.2: Custom Arena Layout  
- [ ] TODO 4.3: Compile-Time Specialization

This plan provides a systematic approach to closing the iteration performance gap while managing implementation risk and maintaining code quality.


================================================
FILE: python/CHANGELOG.md
================================================
# Changelog

All notable changes to the B+ Tree Python implementation will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [Unreleased]

### Added
- Modern Python packaging with pyproject.toml
- Cross-platform CI/CD with GitHub Actions
- Comprehensive test matrix across Python 3.8-3.12
- Automated wheel building for Linux, macOS, and Windows
- Complete dictionary API compatibility
- Iterator modification safety with runtime error detection
- Comprehensive test suite for iterator safety scenarios

### Changed
- Updated setup.py to work with modern packaging standards
- Improved C extension build configuration with platform-specific optimizations
- Enhanced error handling and memory safety in C extension

### Fixed
- **CRITICAL**: Segmentation fault in C extension during iterator use after tree modification
- Iterator safety now raises RuntimeError instead of crashing when tree is modified during iteration
- Length counter synchronization issues in adversarial test patterns
- Critical memory safety issues in C extension node splitting
- Reference counting bugs that caused segmentation faults
- Circular import issues in pure Python implementation

### Security
- Eliminated segmentation faults that could potentially be exploited
- Added modification counter to prevent unsafe memory access patterns

## [0.1.0] - 2024-XX-XX

### Added
- Initial B+ Tree implementation with pure Python fallback
- C extension for high-performance operations
- Basic dictionary-like API (`__getitem__`, `__setitem__`, `__delitem__`)
- Range query support with `items(start_key, end_key)`
- Comprehensive test suite with 115+ tests
- Performance benchmarks and analysis
- Basic documentation and examples

### Performance
- 1.4-2.5x faster than SortedDict for range queries
- Efficient insertion and deletion operations
- Memory-efficient arena-based allocation in Rust implementation

---

## Release Types

- **Major** (X.0.0): Breaking API changes
- **Minor** (0.X.0): New features, backwards compatible
- **Patch** (0.0.X): Bug fixes, no new features

## Contributing

When making changes:
1. Add entry under `[Unreleased]` section
2. Use standard categories: Added, Changed, Deprecated, Removed, Fixed, Security
3. Include issue/PR numbers where applicable
4. Update version number in `__init__.py` before release

================================================
FILE: python/LICENSE
================================================
MIT License

Copyright (c) 2025 Kent Beck

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: python/MANIFEST.in
================================================
# Include source files for C extension
include bplustree_c_src/*.c
include bplustree_c_src/*.h

# Include documentation
include README.md
include LICENSE
recursive-include docs *.md
recursive-include examples *.py

# Include test files in source distribution
recursive-include tests *.py
include conftest.py

# Include configuration files
include pyproject.toml
include setup.py
include *.cfg
include *.ini

# Exclude build artifacts and temporary files
global-exclude *.pyc
global-exclude *.pyo
global-exclude *.pyd
global-exclude __pycache__
global-exclude .DS_Store
global-exclude *.so
global-exclude *.o
global-exclude .pytest_cache
recursive-exclude tmp *
recursive-exclude build *
recursive-exclude dist *
recursive-exclude *.egg-info *

================================================
FILE: python/README.md
================================================
# BPlusTree - Python Implementation

A high-performance B+ tree implementation for Python with competitive performance against highly optimized libraries like SortedDict.

## 🚀 Quick Start

### Installation

**Option 1: Install from source (current)**

```bash
git clone https://github.com/KentBeck/BPlusTree.git
cd BPlusTree/python
pip install -e .
```

**Option 2: Install from PyPI (coming soon)**

```bash
pip install bplustree
```

### Requirements

- Python 3.8 or higher
- C compiler (for C extension, optional)

### Implementation Selection

The library automatically selects the best available implementation:

1. **C Extension** (preferred): 2-4x faster, used automatically if available
2. **Pure Python**: Fallback implementation, no compilation required

Check which implementation is being used:

```python
from bplustree import get_implementation
print(get_implementation())  # "C extension" or "Pure Python"
```

## 📖 Basic Usage

```python
from bplustree import BPlusTreeMap

# Create a B+ tree
tree = BPlusTreeMap(capacity=128)  # Higher capacity = better performance

# Insert data
tree[1] = "one"
tree[3] = "three"
tree[2] = "two"

# Lookups
print(tree[2])        # "two"
print(len(tree))      # 3
print(2 in tree)      # True

# Range queries
for key, value in tree.range(1, 3):
    print(f"{key}: {value}")

# Iteration
for key, value in tree.items():
    print(f"{key}: {value}")
```

## ⚡ Performance Highlights

Our benchmarks against SortedDict show **significant advantages** in specific scenarios:

### 🏆 **Where B+ Tree Excels**

| Scenario                    | B+ Tree Advantage      | Use Cases                              |
| --------------------------- | ---------------------- | -------------------------------------- |
| **Partial Range Scans**     | **Up to 2.5x faster**  | Database LIMIT queries, pagination     |
| **Large Dataset Iteration** | **1.1x - 1.4x faster** | Data export, bulk processing           |
| **Medium Range Queries**    | **1.4x faster**        | Time-series analysis, batch processing |

### 📊 **Benchmark Results**

**Partial Range Scans (Early Termination):**

```
Limit  10 items: B+ Tree 1.18x faster
Limit  50 items: B+ Tree 2.50x faster  ⭐ Best performance
Limit 100 items: B+ Tree 1.52x faster
Limit 500 items: B+ Tree 1.15x faster
```

**Large Dataset Iteration:**

```
200K items: B+ Tree 1.29x faster
300K items: B+ Tree 1.12x faster
500K items: B+ Tree 1.39x faster  ⭐ Scales well
```

**Optimal Configuration:**

- **Capacity 128** provides best performance (3.3x faster than capacity 4)
- Performance continues improving with larger capacities

## 🎯 **When to Choose B+ Tree**

**Excellent for:**

- Database-like workloads with range queries
- Analytics dashboards ("top 100 users")
- Search systems with pagination
- Time-series data processing
- Data export and ETL operations
- Any scenario with "LIMIT" or early termination patterns

**Use SortedDict when:**

- Random access dominates (37x faster individual lookups)
- Small datasets (< 100K items)
- Memory efficiency is critical
- General-purpose sorted container needs

## 🔧 Configuration

```python
# Small capacity: More splits, good for testing
tree = BPlusTree(capacity=4)

# Medium capacity: Balanced performance
tree = BPlusTree(capacity=16)

# Large capacity: Optimal for most use cases
tree = BPlusTree(capacity=128)  # Recommended!
```

## 🧪 Testing

```bash
# Run tests
python -m pytest tests/

# Run performance benchmarks
python tests/test_performance_vs_sorteddict.py

# Run specific tests
python -m pytest tests/test_bplustree.py -v
```

## 📖 API Reference

### Basic Operations

```python
tree = BPlusTree(capacity=128)

# Dictionary-like interface
tree[key] = value
value = tree[key]        # Raises KeyError if not found
del tree[key]           # Raises KeyError if not found
key in tree             # Returns bool
len(tree)               # Returns int

# Safe operations
tree.get(key, default=None)
tree.pop(key, default=None)
```

### Iteration and Ranges

```python
# Full iteration
for key, value in tree.items():
    pass

for key in tree.keys():
    pass

for value in tree.values():
    pass

# Range queries
for key, value in tree.range(start_key, end_key):
    pass

# Range with None bounds
for key, value in tree.range(start_key, None):  # From start_key to end
    pass

for key, value in tree.range(None, end_key):    # From beginning to end_key
    pass
```

## 🔒 Iterator Safety

The C extension provides **iterator safety** to prevent segmentation faults during tree modifications:

```python
tree = BPlusTree(capacity=128)
for i in range(10):
    tree[i] = f"value_{i}"

# Create iterator
keys_iter = tree.keys()
first_key = next(keys_iter)

# Modify tree during iteration
tree[100] = "new_value"

# Iterator detects modification and raises RuntimeError
try:
    next(keys_iter)
except RuntimeError as e:
    print(e)  # "tree changed size during iteration"
```

**Safety Features:**

- **Modification detection**: Iterators track tree changes via internal counter
- **Graceful failure**: RuntimeError instead of segmentation fault
- **Multiple iterator support**: All active iterators are invalidated on modification
- **Consistent behavior**: Matches Python's dict iterator safety model

**Safe Patterns:**

```python
# ✅ Safe: Complete iteration before modification
keys = list(tree.keys())  # Collect all keys first
for key in keys:
    tree[key] = new_value

# ✅ Safe: Use fresh iterator after modifications
tree[new_key] = new_value
for key, value in tree.items():  # New iterator, safe to use
    process(key, value)
```

## 🏗️ Architecture

- **Arena-based memory management** for efficiency
- **Linked leaf nodes** for fast sequential access
- **Optimized rebalancing** algorithms
- **Hybrid navigation** for range queries
- **Iterator safety** with modification counter tracking

## 📚 Documentation & Examples

- **[API Reference](./docs/API_REFERENCE.md)** - Complete API documentation
- **[Examples](./examples/)** - Comprehensive usage examples:
  - [Basic Usage](./examples/basic_usage.py) - Fundamental operations
  - [Range Queries](./examples/range_queries.py) - Range query patterns
  - [Performance Demo](./examples/performance_demo.py) - Benchmarks vs alternatives
  - [Migration Guide](./examples/migration_guide.py) - Migrating from dict/SortedDict

## 🔗 Links

- [Main Project](../) - Dual Rust/Python implementation
- [Rust Implementation](../rust/) - Core Rust library
- [Technical Documentation](../rust/docs/) - Architecture and benchmarks

## 📄 License

This project is licensed under the MIT License - see the LICENSE file for details.


================================================
FILE: python/benchmarks/performance_benchmark.py
================================================
#!/usr/bin/env python3
"""
Performance benchmark for B+ Tree implementation.

This script runs standardized benchmarks and outputs results in a format
suitable for CI/CD performance tracking.
"""

import time
import random
import json
import sys
from datetime import datetime
from typing import Dict, List, Any

import os

# Add parent directory to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from bplustree import BPlusTreeMap


class BenchmarkSuite:
    """Suite of performance benchmarks."""

    def __init__(self, size: int = 10000):
        self.size = size
        self.results = {}

    def time_operation(self, name: str, operation):
        """Time an operation and store the result."""
        start = time.perf_counter()
        result = operation()
        end = time.perf_counter()
        duration = end - start

        self.results[name] = {
            "duration": duration,
            "operations": self.size,
            "ops_per_second": self.size / duration if duration > 0 else 0,
        }

        return result

    def benchmark_sequential_insertion(self):
        """Benchmark sequential insertions."""
        tree = BPlusTreeMap()

        def insert_sequential():
            for i in range(self.size):
                tree[i] = f"value_{i}"
            return tree

        return self.time_operation("sequential_insertion", insert_sequential)

    def benchmark_random_insertion(self):
        """Benchmark random insertions."""
        tree = BPlusTreeMap()
        keys = list(range(self.size))
        random.shuffle(keys)

        def insert_random():
            for key in keys:
                tree[key] = f"value_{key}"
            return tree

        return self.time_operation("random_insertion", insert_random)

    def benchmark_lookups(self, tree: BPlusTreeMap):
        """Benchmark lookups on existing tree."""
        keys = list(range(self.size))
        random.shuffle(keys)

        def perform_lookups():
            for key in keys:
                _ = tree[key]

        self.time_operation("random_lookups", perform_lookups)

    def benchmark_range_queries(self, tree: BPlusTreeMap):
        """Benchmark range queries."""
        # Test 10% range queries
        range_size = self.size // 10

        def perform_range_queries():
            results = []
            for i in range(10):
                start = i * range_size
                end = (i + 1) * range_size
                results.append(list(tree.items(start, end)))
            return results

        return self.time_operation("range_queries_10_percent", perform_range_queries)

    def benchmark_iteration(self, tree: BPlusTreeMap):
        """Benchmark full iteration."""

        def iterate_tree():
            return list(tree.items())

        return self.time_operation("full_iteration", iterate_tree)

    def benchmark_deletions(self, tree: BPlusTreeMap):
        """Benchmark deletions."""
        keys = list(range(self.size))
        random.shuffle(keys)

        def perform_deletions():
            for key in keys:
                del tree[key]

        self.time_operation("random_deletions", perform_deletions)

    def benchmark_dict_comparison(self):
        """Compare with standard dict performance."""
        # B+ Tree sequential
        tree = BPlusTreeMap()
        tree_start = time.perf_counter()
        for i in range(self.size):
            tree[i] = f"value_{i}"
        tree_time = time.perf_counter() - tree_start

        # Dict sequential
        d = {}
        dict_start = time.perf_counter()
        for i in range(self.size):
            d[i] = f"value_{i}"
        dict_time = time.perf_counter() - dict_start

        self.results["comparison_vs_dict"] = {
            "bplustree_time": tree_time,
            "dict_time": dict_time,
            "ratio": tree_time / dict_time if dict_time > 0 else 0,
        }

        # Sorted iteration comparison
        tree_iter_start = time.perf_counter()
        tree_items = list(tree.items())
        tree_iter_time = time.perf_counter() - tree_iter_start

        dict_sort_start = time.perf_counter()
        dict_items = sorted(d.items())
        dict_sort_time = time.perf_counter() - dict_sort_start

        self.results["sorted_iteration_comparison"] = {
            "bplustree_time": tree_iter_time,
            "dict_sort_time": dict_sort_time,
            "ratio": tree_iter_time / dict_sort_time if dict_sort_time > 0 else 0,
        }

    def run_all_benchmarks(self):
        """Run all benchmarks and return results."""
        print(f"Running benchmarks with {self.size:,} items...")

        # Sequential insertion
        print("- Sequential insertion...")
        tree_seq = self.benchmark_sequential_insertion()

        # Random insertion
        print("- Random insertion...")
        tree_rand = self.benchmark_random_insertion()

        # Lookups
        print("- Random lookups...")
        self.benchmark_lookups(tree_seq)

        # Range queries
        print("- Range queries...")
        self.benchmark_range_queries(tree_seq)

        # Iteration
        print("- Full iteration...")
        self.benchmark_iteration(tree_seq)

        # Deletions
        print("- Random deletions...")
        self.benchmark_deletions(tree_seq)

        # Dict comparison
        print("- Dictionary comparison...")
        self.benchmark_dict_comparison()

        return self.results


def format_results(results: Dict[str, Any]) -> str:
    """Format results for display."""
    output = []
    output.append("\n" + "=" * 60)
    output.append("B+ Tree Performance Benchmark Results")
    output.append("=" * 60)

    for test_name, data in results.items():
        output.append(f"\n{test_name}:")
        if "duration" in data:
            output.append(f"  Duration: {data['duration']:.4f} seconds")
            if "ops_per_second" in data:
                output.append(f"  Operations/second: {data['ops_per_second']:,.0f}")
        else:
            for key, value in data.items():
                if isinstance(value, float):
                    output.append(f"  {key}: {value:.4f}")
                else:
                    output.append(f"  {key}: {value}")

    output.append("\n" + "=" * 60)
    return "\n".join(output)


def save_results(results: Dict[str, Any], filename: str = None):
    """Save results to JSON file."""
    if filename is None:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"benchmark_results_{timestamp}.json"

    # Add metadata
    full_results = {
        "timestamp": datetime.now().isoformat(),
        "size": results.get("size", 10000),
        "results": results,
    }

    with open(filename, "w") as f:
        json.dump(full_results, f, indent=2)

    return filename


def main():
    """Run benchmarks with different sizes."""
    sizes = [1000, 10000, 50000] if "--full" in sys.argv else [10000]

    all_results = {}

    for size in sizes:
        print(f"\n{'='*60}")
        print(f"Running benchmarks for size: {size:,}")
        print("=" * 60)

        suite = BenchmarkSuite(size)
        results = suite.run_all_benchmarks()
        all_results[size] = results

        print(format_results(results))

    # Save results if requested
    if "--save" in sys.argv:
        filename = save_results(all_results)
        print(f"\nResults saved to: {filename}")

    # Check for performance regressions
    if "--check-regression" in sys.argv:
        # Simple regression check - you can make this more sophisticated
        baseline_size = 10000
        if baseline_size in all_results:
            sequential_time = all_results[baseline_size]["sequential_insertion"][
                "duration"
            ]
            if sequential_time > 0.5:  # 0.5 seconds threshold
                print(
                    f"\n⚠️  WARNING: Sequential insertion took {sequential_time:.4f}s, "
                    f"exceeding threshold of 0.5s"
                )
                sys.exit(1)

    print("\n✅ All benchmarks completed successfully!")


if __name__ == "__main__":
    main()


================================================
FILE: python/bplustree/__init__.py
================================================
"""
B+ Tree mapping implementation with optional C extension.

This package provides an ordered key-value mapping based on a B+ tree.
It supports efficient insertion, deletion, lookup, and range queries. If the
optional C extension is available, it is used automatically for improved
performance; otherwise, the pure Python implementation is used.
"""

# Prefer C extension for performance, fallback to Python implementation
_using_c_extension = False

try:
    from . import bplustree_c as _c_ext
except ImportError:
    from .bplus_tree import BPlusTreeMap
else:

    class BPlusTreeMap(_c_ext.BPlusTree):
        """Wrapper around the C extension to provide a consistent API."""

        def __init__(self, capacity=None):
            """Initialize BPlusTreeMap with optional capacity."""
            if capacity is None:
                super().__init__()
            else:
                super().__init__(capacity=capacity)

        def get(self, key, default=None):
            """Get value with default."""
            try:
                return self[key]
            except KeyError:
                return default

        def values(self):
            """Return iterator over values."""
            for key, value in self.items():
                yield value

        def clear(self):
            """Remove all items from the tree."""
            # C extension doesn't have clear method, so remove keys one by one
            # Use while loop to avoid issues with iterator invalidation
            while len(self) > 0:
                # Get first key and delete it
                for key in self.keys():
                    del self[key]
                    break

        def pop(self, key, *args):
            """Remove and return value for key with optional default."""
            if len(args) > 1:
                raise TypeError(
                    f"pop expected at most 2 arguments, got {len(args) + 1}"
                )
            try:
                value = self[key]
                del self[key]
                return value
            except KeyError:
                if args:
                    return args[0]
                raise

        def popitem(self):
            """Remove and return an arbitrary (key, value) pair."""
            try:
                # Get the first key-value pair
                for key, value in self.items():
                    del self[key]
                    return (key, value)
            except:
                pass
            raise KeyError("popitem(): tree is empty")

        def setdefault(self, key, default=None):
            """Get value for key, setting and returning default if not present."""
            try:
                return self[key]
            except KeyError:
                self[key] = default
                return default

        def update(self, other):
            """Update tree with key-value pairs from other mapping or iterable."""
            if hasattr(other, "items"):
                # other is a mapping (dict-like)
                for key, value in other.items():
                    self[key] = value
            elif hasattr(other, "keys"):
                # other has keys method but no items (like dict.keys())
                for key in other.keys():
                    self[key] = other[key]
            else:
                # other is an iterable of (key, value) pairs
                for key, value in other:
                    self[key] = value

        def copy(self):
            """Create a shallow copy of the tree."""
            new_tree = BPlusTreeMap(capacity=self.capacity)
            for key, value in self.items():
                new_tree[key] = value
            return new_tree

        @property
        def capacity(self):
            """Return the node capacity."""
            return 8

        @property
        def root(self):
            """Not exposed by the C extension."""
            raise AttributeError("C extension does not expose internal tree structure")

        @property
        def leaves(self):
            """Not exposed by the C extension."""
            raise AttributeError("C extension does not expose internal tree structure")

    _using_c_extension = True

# Node classes are internal implementation details, not exported
from .bplus_tree import Node as _Node, LeafNode as _LeafNode, BranchNode as _BranchNode

__version__ = "0.9.0"
__all__ = ["BPlusTreeMap"]


def get_implementation():
    """Return which implementation is being used."""
    return "C extension" if _using_c_extension else "Pure Python"


================================================
FILE: python/bplustree/bplus_tree.py
================================================
"""
B+ Tree implementation in Python with dict-like API.

This module provides a B+ tree data structure with a dictionary-like interface,
supporting efficient insertion, deletion, lookup, and range queries.
"""

import bisect
from abc import ABC, abstractmethod
from typing import Any, Optional, List, Tuple, Union, Iterator

__all__ = ["BPlusTreeMap", "Node", "LeafNode", "BranchNode"]

# Constants
MIN_CAPACITY = 4
DEFAULT_CAPACITY = 128
BULK_LOAD_BATCH_MULTIPLIER = 2
MIN_BULK_LOAD_BATCH_SIZE = 50


class BPlusTreeError(Exception):
    """Base exception for B+ tree operations."""

    pass


class InvalidCapacityError(BPlusTreeError):
    """Raised when an invalid capacity is specified."""

    pass


class BPlusTreeMap:
    """B+ Tree implementation with Python dict-like API.

    A B+ tree is a self-balancing tree data structure that maintains sorted data
    and allows searches, sequential access, insertions, and deletions in O(log n).
    Unlike B trees, all values are stored in leaf nodes, which are linked together
    for efficient range queries.

    Attributes:
        capacity: Maximum number of keys per node.
        root: The root node of the tree.
        leaves: The leftmost leaf node (head of linked list).

    Example:
        >>> tree = BPlusTreeMap(capacity=32)
        >>> tree[1] = "one"
        >>> tree[2] = "two"
        >>> print(tree[1])
        one
        >>> for key, value in tree.items():
        ...     print(f"{key}: {value}")
        1: one
        2: two
    """

    def __init__(self, capacity: int = DEFAULT_CAPACITY) -> None:
        """Create a B+ tree with specified node capacity.

        Args:
            capacity: Maximum number of keys per node (minimum 4).

        Raises:
            InvalidCapacityError: If capacity is less than 4.
        """
        if capacity < MIN_CAPACITY:
            raise InvalidCapacityError(
                f"Capacity must be at least {MIN_CAPACITY} to maintain B+ tree invariants"
            )
        self.capacity = capacity
        self._rightmost_leaf_cache: Optional[LeafNode] = None

        original = LeafNode(self.capacity)
        self.leaves: LeafNode = original
        self.root: Node = original

    @classmethod
    def from_sorted_items(
        cls, items, capacity: int = DEFAULT_CAPACITY
    ) -> "BPlusTreeMap":
        """Bulk load from sorted key-value pairs for 3-5x faster construction.

        Args:
            items: Iterable of (key, value) pairs that MUST be sorted by key.
            capacity: Node capacity (minimum 4).

        Returns:
            BPlusTreeMap instance with loaded data.

        Raises:
            InvalidCapacityError: If capacity is less than 4.
        """
        tree = cls(capacity=capacity)
        tree._bulk_load_sorted(items)
        return tree

    def _bulk_load_sorted(self, items) -> None:
        """Internal bulk loading implementation for sorted items."""
        items_list = list(items)
        if not items_list:
            return
        optimal_batch_size = max(
            self.capacity * BULK_LOAD_BATCH_MULTIPLIER, MIN_BULK_LOAD_BATCH_SIZE
        )

        for i in range(0, len(items_list), optimal_batch_size):
            batch_end = min(i + optimal_batch_size, len(items_list))

            for j in range(i, batch_end):
                key, value = items_list[j]
                self._insert_sorted_optimized(key, value)

    def _insert_sorted_optimized(self, key: Any, value: Any) -> None:
        """Optimized insertion for sorted data - avoids repeated tree traversals.

        Args:
            key: The key to insert.
            value: The value to associate with the key.
        """
        if (
            self._rightmost_leaf_cache
            and self._rightmost_leaf_cache.keys
            and key > self._rightmost_leaf_cache.keys[-1]
            and not self._rightmost_leaf_cache.is_full()
        ):
            self._rightmost_leaf_cache.keys.append(key)
            self._rightmost_leaf_cache.values.append(value)
            return

        self[key] = value
        self._update_rightmost_leaf_cache()

    def _update_rightmost_leaf_cache(self) -> None:
        """Update the rightmost leaf cache."""
        current = self.leaves
        while current.next is not None:
            current = current.next
        self._rightmost_leaf_cache = current

    def __setitem__(self, key: Any, value: Any) -> None:
        """Set a key-value pair (dict-like API).

        Args:
            key: The key to insert or update.
            value: The value to associate with the key.
        """
        result = self._insert_recursive(self.root, key, value)

        # If the root split, create a new root
        if result is not None:
            new_node, separator_key = result
            new_root = BranchNode(self.capacity)
            new_root.keys.append(separator_key)
            new_root.children.append(self.root)
            new_root.children.append(new_node)
            self.root = new_root

    def _insert_recursive(
        self, node: "Node", key: Any, value: Any
    ) -> Optional[Tuple["Node", Any]]:
        """
        Recursively insert a key-value pair into the tree.
        Returns None for a simple insertion, or (new_node, separator_key) if a split occurred.
        """
        if node.is_leaf():
            # Base case: insert into leaf
            return self._insert_into_leaf(node, key, value)

        child_index = node.find_child_index(key)
        child = node.children[child_index]

        split_result = self._insert_recursive(child, key, value)
        if split_result is None:
            return None

        new_child, separator_key = split_result
        return self._insert_into_branch(node, child_index, separator_key, new_child)

    def _insert_into_leaf(
        self, leaf: "LeafNode", key: Any, value: Any
    ) -> Optional[Tuple["LeafNode", Any]]:
        """Insert into a leaf node. Returns None or (new_leaf, separator) if split."""
        pos, exists = leaf.find_position(key)

        # If key exists, just update (no split needed)
        if exists:
            leaf.values[pos] = value
            return None

        # If leaf is not full, simple insertion
        if not leaf.is_full():
            leaf.insert(key, value)
            return None

        # Leaf is full, need to split
        return leaf.split_and_insert(key, value)

    def _insert_into_branch(
        self,
        branch: "BranchNode",
        child_index: int,
        separator_key: Any,
        new_child: "Node",
    ) -> Optional[Tuple["BranchNode", Any]]:
        """Insert a separator and new child into a branch node. Returns None or (new_branch, separator) if split."""
        return branch.insert_child_and_split_if_needed(
            child_index, separator_key, new_child
        )

    def __getitem__(self, key: Any) -> Any:
        """Get value for a key (dict-like API)"""
        value = self.get(key)
        if value is None:
            # Check if key actually exists but has None value
            if key in self:
                return None
            raise KeyError(key)
        return value

    def get(self, key: Any, default: Any = None) -> Any:
        """Get value for a key with optional default.

        Args:
            key: The key to look up.
            default: Value to return if key not found (default: None).

        Returns:
            The value associated with the key, or default if not found.
        """
        node = self.root
        while not node.is_leaf():
            node = node.get_child(key)

        value = node.get(key)
        return value if value is not None else default

    def __contains__(self, key: Any) -> bool:
        """Check if key exists (for 'in' operator)"""
        node = self.root
        while not node.is_leaf():
            node = node.get_child(key)

        pos, exists = node.find_position(key)
        return exists

    def __len__(self) -> int:
        """Return number of key-value pairs"""
        return self.leaves.key_count()

    def __bool__(self) -> bool:
        """Return True if tree is not empty"""
        return len(self) > 0

    def __delitem__(self, key: Any) -> None:
        """Delete a key (dict-like API)"""
        deleted = self._delete_recursive(self.root, key)
        if not deleted:
            raise KeyError(key)

    def _delete_recursive(self, node: "Node", key: Any) -> bool:
        """
        Recursively delete a key from the tree.
        Returns True if the key was found and deleted, False otherwise.
        """
        if node.is_leaf():
            # Base case: delete from leaf
            # Note: underflow handling will be done by parent
            return self._delete_from_leaf(node, key)

        # Recursive case: find the correct child and recurse
        child_index = node.find_child_index(key)
        child = node.children[child_index]
        deleted = self._delete_recursive(child, key)
        if not deleted:
            return False

        # Handle child underflow after deletion
        if len(child) == 0 or child.is_underfull():
            # Child is underfull (including completely empty), try redistribution or merging
            self._handle_underflow(node, child_index)

            # If parent became underfull it will be handled by the calling recursive call.

        # Handle root collapse: if root has only one child, make that child the new root
        if node == self.root and not node.is_leaf() and len(node.children) == 1:
            self.root = node.children[0]

        return deleted

    def _handle_underflow(self, parent: "BranchNode", child_index: int) -> None:
        """Handle underflow in a child node by trying redistribution first"""
        child = parent.children[child_index]

        # If child is not underfull, nothing to do
        if not child.is_underfull():
            return

        # Handle empty children by merging them (they can't redistribute)
        if len(child) == 0:
            self._merge_with_sibling(parent, child_index)
            return

        # Try to redistribute from siblings
        redistributed = False

        # Try to borrow from right sibling
        if child_index < len(parent.children) - 1:
            right_sibling = parent.children[child_index + 1]
            if right_sibling.can_donate():
                self._redistribute_from_right(parent, child_index)
                redistributed = True

        # If no redistribution from right, try left sibling
        if not redistributed and child_index > 0:
            left_sibling = parent.children[child_index - 1]
            if left_sibling.can_donate():
                self._redistribute_from_left(parent, child_index)
                redistributed = True

        # If redistribution failed, try to merge with a sibling
        if not redistributed:
            self._merge_with_sibling(parent, child_index)

    def _redistribute_from_left(self, parent: "BranchNode", child_index: int) -> None:
        """Redistribute keys from left sibling to child"""
        child = parent.children[child_index]
        left_sibling = parent.children[child_index - 1]

        if child.is_leaf():
            # Leaf redistribution
            child.borrow_from_left(left_sibling)
            # Update separator key in parent
            parent.keys[child_index - 1] = child.keys[0]
        else:
            # Branch redistribution
            separator_key = parent.keys[child_index - 1]
            new_separator = child.borrow_from_left(left_sibling, separator_key)
            parent.keys[child_index - 1] = new_separator

    def _redistribute_from_right(self, parent: "BranchNode", child_index: int) -> None:
        """Redistribute keys from right sibling to child"""
        child = parent.children[child_index]
        right_sibling = parent.children[child_index + 1]

        if child.is_leaf():
            # Leaf redistribution
            child.borrow_from_right(right_sibling)
            # Update separator key in parent
            parent.keys[child_index] = right_sibling.keys[0]
        else:
            # Branch redistribution
            separator_key = parent.keys[child_index]
            new_separator = child.borrow_from_right(right_sibling, separator_key)
            parent.keys[child_index] = new_separator

    def _merge_with_sibling(self, parent: "BranchNode", child_index: int) -> None:
        """Merge an underfull child with one of its siblings"""
        child = parent.children[child_index]

        # Validate parent structure before merging
        if child_index >= len(parent.children):
            raise ValueError(
                f"Invalid child_index {child_index} for parent with {len(parent.children)} children"
            )
        if len(parent.keys) != len(parent.children) - 1:
            raise ValueError(
                f"Parent structure invalid: {len(parent.keys)} keys but {len(parent.children)} children"
            )

        # Prefer merging with left sibling (arbitrary choice)
        if child_index > 0:
            # Merge with left sibling
            left_sibling = parent.children[child_index - 1]

            if child.is_leaf():
                # Check if merging would exceed capacity
                total_keys = len(left_sibling.keys) + len(child.keys)
                if total_keys <= self.capacity:
                    # Safe to merge
                    left_sibling.merge_with_right(child)
                    # Remove the merged child and its separator
                    parent.children.pop(child_index)
                    parent.keys.pop(child_index - 1)
                else:
                    # Cannot merge without exceeding capacity - leave nodes separate
                    # This preserves tree structure but may leave underfull nodes
                    pass
            else:
                # Check if merging would exceed capacity
                total_keys = (
                    len(left_sibling.keys) + len(child.keys) + 1
                )  # +1 for separator
                total_children = len(left_sibling.children) + len(child.children)
                if total_keys <= self.capacity and total_children <= self.capacity + 1:
                    # Safe to merge
                    separator_key = parent.keys[child_index - 1]
                    left_sibling.merge_with_right(child, separator_key)
                    # Remove the merged child and its separator
                    parent.children.pop(child_index)
                    parent.keys.pop(child_index - 1)
                else:
                    # Cannot merge without exceeding capacity - leave nodes separate
                    pass

        elif child_index < len(parent.children) - 1:
            # Merge with right sibling
            right_sibling = parent.children[child_index + 1]

            if child.is_leaf():
                # Check if merging would exceed capacity
                total_keys = len(child.keys) + len(right_sibling.keys)
                if total_keys <= self.capacity:
                    # Safe to merge
                    child.merge_with_right(right_sibling)
                    # Remove the merged sibling and its separator
                    parent.children.pop(child_index + 1)
                    parent.keys.pop(child_index)
                else:
                    # Cannot merge without exceeding capacity - leave nodes separate
                    pass
            else:
                # Check if merging would exceed capacity
                total_keys = (
                    len(child.keys) + len(right_sibling.keys) + 1
                )  # +1 for separator
                total_children = len(child.children) + len(right_sibling.children)
                if total_keys <= self.capacity and total_children <= self.capacity + 1:
                    # Safe to merge
                    separator_key = parent.keys[child_index]
                    child.merge_with_right(right_sibling, separator_key)
                    # Remove the merged sibling and its separator
                    parent.children.pop(child_index + 1)
                    parent.keys.pop(child_index)
                else:
                    # Cannot merge without exceeding capacity - leave nodes separate
                    pass
        else:
            # This can happen when a parent has only one child left
            # In this case, we should handle it by collapsing the tree structure
            # This will be handled by the caller in _delete_recursive
            pass

    def _delete_from_leaf(self, leaf: "LeafNode", key: Any) -> bool:
        """Delete from a leaf node. Returns True if deleted, False if not found."""
        deleted = leaf.delete(key)
        return deleted is not None

    def keys(self, start_key=None, end_key=None) -> Iterator[Any]:
        """Return an iterator over keys in the given range"""
        for key, _ in self.items(start_key, end_key):
            yield key

    def values(self, start_key=None, end_key=None) -> Iterator[Any]:
        """Return an iterator over values in the given range"""
        for _, value in self.items(start_key, end_key):
            yield value

    def items(self, start_key=None, end_key=None) -> Iterator[Tuple[Any, Any]]:
        """Return an iterator over (key, value) pairs in the given range"""
        if start_key is None:
            current = self.leaves
            start_index = 0
        else:
            current = self._find_leaf_for_key(start_key)
            if current is None:
                return
            start_index = self._find_position_in_leaf(current, start_key)

        while current is not None:
            for i in range(start_index, len(current.keys)):
                key = current.keys[i]
                if end_key is not None and key >= end_key:
                    return
                yield (key, current.values[i])

            current = current.next
            start_index = 0

    def _find_leaf_for_key(self, key: Any) -> Optional["LeafNode"]:
        """Find the leaf node that contains or would contain the given key"""
        return self.root.find_leaf_for_key(key)

    def _find_position_in_leaf(self, leaf: "LeafNode", key: Any) -> int:
        """Find the position where key is or would be in the leaf"""
        # Binary search for the position
        left, right = 0, len(leaf.keys)
        while left < right:
            mid = (left + right) // 2
            if key <= leaf.keys[mid]:
                right = mid
            else:
                left = mid + 1
        return left

    def range(
        self, start_key: Any = None, end_key: Any = None
    ) -> Iterator[Tuple[Any, Any]]:
        """Return an iterator over (key, value) pairs in the specified range.

        Args:
            start_key: Start of range (inclusive). Use None for beginning.
            end_key: End of range (exclusive). Use None for end.

        Returns:
            Iterator over (key, value) tuples in the range.

        Example:
            for key, value in tree.range(5, 10):  # Keys 5-9
                print(f"{key}: {value}")
        """
        return self.items(start_key, end_key)

    def clear(self) -> None:
        """Remove all items from the tree (dict-like API)."""
        # Reset to initial state with a single empty leaf
        original = LeafNode(self.capacity)
        self.leaves = original
        self.root = original
        self._rightmost_leaf_cache = None

    def pop(self, key: Any, *args) -> Any:
        """Remove and return value for key with optional default (dict-like API).

        Args:
            key: The key to remove.
            *args: Optional default value if key is not found.

        Returns:
            The value that was associated with key, or default if key not found.

        Raises:
            KeyError: If key is not found and no default is provided.
        """
        if len(args) > 1:
            raise TypeError(f"pop expected at most 2 arguments, got {len(args) + 1}")

        try:
            value = self[key]
            del self[key]
            return value
        except KeyError:
            if args:
                return args[0]
            raise

    def popitem(self) -> Tuple[Any, Any]:
        """Remove and return an arbitrary (key, value) pair (dict-like API).

        Returns:
            A (key, value) tuple.

        Raises:
            KeyError: If the tree is empty.
        """
        if len(self) == 0:
            raise KeyError("popitem(): tree is empty")

        # Get the first key-value pair from the leftmost leaf
        first_leaf = self.leaves
        if len(first_leaf.keys) == 0:
            raise KeyError("popitem(): tree is empty")

        key = first_leaf.keys[0]
        value = first_leaf.values[0]
        del self[key]
        return (key, value)

    def setdefault(self, key: Any, default: Any = None) -> Any:
        """Get value for key, setting and returning default if not present (dict-like API).

        Args:
            key: The key to look up.
            default: Default value to set and return if key is not found.

        Returns:
            The existing value for key, or default if key was not present.
        """
        try:
            return self[key]
        except KeyError:
            self[key] = default
            return default

    def update(self, other) -> None:
        """Update tree with key-value pairs from other mapping or iterable (dict-like API).

        Args:
            other: A mapping (dict-like) or iterable of (key, value) pairs.
        """
        if hasattr(other, "items"):
            # other is a mapping (dict-like)
            for key, value in other.items():
                self[key] = value
        elif hasattr(other, "keys"):
            # other has keys method but no items (like dict.keys())
            for key in other.keys():
                self[key] = other[key]
        else:
            # other is an iterable of (key, value) pairs
            for key, value in other:
                self[key] = value

    def copy(self) -> "BPlusTreeMap":
        """Create a shallow copy of the tree (dict-like API).

        Returns:
            A new BPlusTreeMap with the same key-value pairs.
        """
        new_tree = BPlusTreeMap(capacity=self.capacity)
        for key, value in self.items():
            new_tree[key] = value
        return new_tree

    """Testing only"""

    def leaf_count(self) -> int:
        """Return the number of leaf nodes"""
        count = 0
        node = self.leaves
        while node is not None:
            count += 1
            node = node.next
        return count

    def _count_total_nodes(self) -> int:
        """Count total nodes in the tree (for testing/debugging)"""

        def count_nodes(node: "Node") -> int:
            if node.is_leaf():
                return 1
            total = 1
            for child in node.children:
                total += count_nodes(child)
            return total

        return count_nodes(self.root)


class Node(ABC):
    """Abstract base class for B+ tree nodes.

    This class defines the interface that both leaf and branch nodes must implement.
    All nodes in the B+ tree have a capacity limit and can check if they are full
    or underfull (for maintaining tree invariants during deletions).
    """

    @abstractmethod
    def is_leaf(self) -> bool:
        """Returns True if this is a leaf node"""
        pass

    @abstractmethod
    def is_full(self) -> bool:
        """Returns True if the node is at capacity"""
        pass

    @abstractmethod
    def __len__(self) -> int:
        """Returns the number of items in the node"""
        pass

    @abstractmethod
    def is_underfull(self) -> bool:
        """Returns True if the node has fewer than minimum required keys"""
        pass


class LeafNode(Node):
    """Leaf node containing key-value pairs.

    Leaf nodes are where all actual key-value pairs are stored in a B+ tree.
    They are linked together to form a doubly-linked list for efficient range queries.

    Attributes:
        capacity: Maximum number of keys this node can hold.
        keys: Sorted list of keys.
        values: List of values corresponding to keys.
        next: Pointer to the next leaf node (for range queries).
    """

    def __init__(self, capacity: int):
        self.capacity = capacity
        self.keys: List[Any] = []
        self.values: List[Any] = []
        self.next: Optional["LeafNode"] = None  # Link to next leaf

    def is_leaf(self) -> bool:
        return True

    def is_full(self) -> bool:
        return len(self.keys) >= self.capacity

    def __len__(self) -> int:
        return len(self.keys)

    def is_underfull(self) -> bool:
        """Check if leaf has fewer than minimum required keys."""
        min_keys = (self.capacity - 1) // 2
        return len(self.keys) < min_keys

    def can_donate(self) -> bool:
        """Check if leaf can give a key to a sibling (has more than minimum)."""
        min_keys = (self.capacity - 1) // 2
        return len(self.keys) > min_keys

    def borrow_from_left(self, left_sibling: "LeafNode") -> None:
        """Borrow the rightmost key-value from left sibling"""
        if not left_sibling.can_donate():
            raise ValueError("Left sibling cannot donate")

        key = left_sibling.keys.pop()
        value = left_sibling.values.pop()
        self.keys.insert(0, key)
        self.values.insert(0, value)

    def borrow_from_right(self, right_sibling: "LeafNode") -> None:
        """Borrow the leftmost key-value from right sibling"""
        if not right_sibling.can_donate():
            raise ValueError("Right sibling cannot donate")

        key = right_sibling.keys.pop(0)
        value = right_sibling.values.pop(0)
        self.keys.append(key)
        self.values.append(value)

    def merge_with_right(self, right_sibling: "LeafNode") -> None:
        """Merge this leaf with its right sibling"""
        # Move all keys and values from right sibling to this node
        self.keys.extend(right_sibling.keys)
        self.values.extend(right_sibling.values)

        # Update linked list to skip the right sibling
        self.next = right_sibling.next

    def find_position(self, key: Any) -> Tuple[int, bool]:
        """
        Find where a key should be inserted.
        Returns (position, exists) where exists is True if key already exists.
        """
        # Use optimized bisect module for binary search
        pos = bisect.bisect_left(self.keys, key)
        exists = pos < len(self.keys) and self.keys[pos] == key
        return pos, exists

    def insert(self, key: Any, value: Any) -> Optional[Any]:
        """
        Insert a key-value pair. Returns old value if key exists.
        """
        pos, exists = self.find_position(key)

        if exists:
            # Update existing value
            old_value = self.values[pos]
            self.values[pos] = value
            return old_value
        else:
            # Insert new key-value pair
            self.keys.insert(pos, key)
            self.values.insert(pos, value)
            return None

    def get(self, key: Any) -> Optional[Any]:
        """Get value for a key, returns None if not found"""
        pos, exists = self.find_position(key)
        if exists:
            return self.values[pos]
        return None

    def delete(self, key: Any) -> Optional[Any]:
        """Delete a key, returns the value if found"""
        pos, exists = self.find_position(key)
        if exists:
            self.keys.pop(pos)
            return self.values.pop(pos)
        return None

    def split(self) -> "LeafNode":
        """Split this leaf node, returning the new right node"""
        # Find the midpoint
        mid = len(self.keys) // 2

        # Create new leaf for right half
        new_leaf = LeafNode(self.capacity)

        # Move right half of keys/values to new leaf
        new_leaf.keys = self.keys[mid:]
        new_leaf.values = self.values[mid:]

        # Keep left half in this leaf
        self.keys = self.keys[:mid]
        self.values = self.values[:mid]

        # Update linked list pointers
        new_leaf.next = self.next
        self.next = new_leaf

        return new_leaf

    def split_and_insert(self, key: Any, value: Any) -> Tuple["LeafNode", Any]:
        """Split leaf and insert key-value, returning (new_leaf, separator_key)"""
        new_leaf = self.split()

        # Insert into appropriate leaf
        if key < new_leaf.keys[0]:
            self.insert(key, value)
        else:
            new_leaf.insert(key, value)

        return new_leaf, new_leaf.keys[0]

    def find_leaf_for_key(self, _key: Any) -> "LeafNode":
        """Find the leaf node that contains or would contain the given key"""
        return self  # Leaf nodes return themselves

    def key_count(self) -> int:
        """Count all keys in this leaf and all following leaves"""
        return len(self) + (0 if self.next is None else self.next.key_count())


class BranchNode(Node):
    """Internal (branch) node containing keys and child pointers.

    Branch nodes guide the search through the tree. They contain separator keys
    and pointers to child nodes. For n keys, there are n+1 children.

    Attributes:
        capacity: Maximum number of keys this node can hold.
        keys: Sorted list of separator keys.
        children: List of child nodes (leaves or other branches).

    Invariants:
        - len(children) == len(keys) + 1
        - All keys in children[i] < keys[i]
        - All keys in children[i+1] >= keys[i]
    """

    def __init__(self, capacity: int):
        self.capacity = capacity
        self.keys: List[Any] = []
        self.children: List[Node] = []

    def is_leaf(self) -> bool:
        return False

    def is_full(self) -> bool:
        return len(self.keys) >= self.capacity

    def __len__(self) -> int:
        return len(self.keys)

    def is_underfull(self) -> bool:
        """Check if branch has fewer than minimum required keys"""
        min_keys = (self.capacity - 1) // 2
        return len(self.keys) < min_keys

    def can_donate(self) -> bool:
        """Check if branch can give a key to a sibling (has more than minimum)"""
        min_keys = (self.capacity - 1) // 2
        return len(self.keys) > min_keys

    def borrow_from_left(self, left_sibling: "BranchNode", separator_key: Any) -> Any:
        """Borrow the rightmost key and child from left sibling, returns new separator"""
        if not left_sibling.can_donate():
            raise ValueError("Left sibling cannot donate")

        # Take the separator key as our leftmost key
        self.keys.insert(0, separator_key)

        # Take the rightmost child from left sibling
        child = left_sibling.children.pop()
        self.children.insert(0, child)

        # The rightmost key from left sibling becomes the new separator
        return left_sibling.keys.pop()

    def borrow_from_right(self, right_sibling: "BranchNode", separator_key: Any) -> Any:
        """Borrow the leftmost key and child from right sibling, returns new separator"""
        if not right_sibling.can_donate():
            raise ValueError("Right sibling cannot donate")

        # Take the separator key as our rightmost key
        self.keys.append(separator_key)

        # Take the leftmost child from right sibling
        child = right_sibling.children.pop(0)
        self.children.append(child)

        # The leftmost key from right sibling becomes the new separator
        return right_sibling.keys.pop(0)

    def merge_with_right(self, right_sibling: "BranchNode", separator_key: Any) -> None:
        """Merge this branch with its right sibling using the separator key"""
        # Add the separator key to this node's keys
        self.keys.append(separator_key)

        # Move all keys and children from right sibling to this node
        self.keys.extend(right_sibling.keys)
        self.children.extend(right_sibling.children)

    def find_child_index(self, key: Any) -> int:
        """Find which child a key should go to"""
        # Validate node structure
        if len(self.children) == 0:
            raise ValueError("BranchNode has no children")
        if len(self.keys) != len(self.children) - 1:
            raise ValueError(
                f"Invalid branch structure: {len(self.keys)} keys, {len(self.children)} children"
            )

        # Use optimized bisect module for binary search
        # bisect_right returns the insertion point for key in keys
        # For B+ trees: if key <= separator, go left; if key > separator, go right
        index = bisect.bisect_right(self.keys, key)

        # Validate result
        if index >= len(self.children):
            raise ValueError(
                f"Child index {index} out of range (have {len(self.children)} children)"
            )

        return index

    def get_child(self, key: Any) -> Node:
        """Get the child node where a key would be found"""
        if not self.children:
            raise ValueError("BranchNode has no children - tree structure corrupted")
        index = self.find_child_index(key)
        if index >= len(self.children):
            raise ValueError(
                f"Child index {index} out of range (have {len(self.children)} children)"
            )
        return self.children[index]

    def split(self) -> "BranchNode":
        """Split this branch node, returning the new right node"""
        # Find the midpoint
        mid = len(self.keys) // 2

        # Create new branch for right half
        new_branch = BranchNode(self.capacity)

        # The middle key becomes the separator to be promoted
        separator_key = self.keys[mid]

        # Move right half of keys to new branch (excluding the middle key)
        new_branch.keys = self.keys[mid + 1 :]

        # Move corresponding children to new branch
        new_branch.children = self.children[mid + 1 :]

        # Keep left half in this branch
        self.keys = self.keys[:mid]
        self.children = self.children[: mid + 1]

        return new_branch, separator_key

    def insert_child_and_split_if_needed(
        self, child_index: int, separator_key: Any, new_child: "Node"
    ) -> Optional[Tuple["BranchNode", Any]]:
        """Insert separator and child, split if necessary. Returns None or (new_branch, promoted_key)"""
        # Insert the separator key and new child at the appropriate position
        self.keys.insert(child_index, separator_key)
        self.children.insert(child_index + 1, new_child)

        # If branch is not full after insertion, we're done
        if not self.is_full():
            return None

        # Branch is full, need to split
        return self.split()

    def find_leaf_for_key(self, key: Any) -> "LeafNode":
        """Find the leaf node that contains or would contain the given key"""
        child = self.get_child(key)
        return child.find_leaf_for_key(key)


================================================
FILE: python/bplustree_c_src/bplustree.h
================================================
/*
 * B+ Tree C Extension Header
 * 
 * Optimized C structures for high-performance B+ tree operations.
 * Uses single array layout for better cache locality.
 */

#ifndef BPLUSTREE_H
#define BPLUSTREE_H

#include <Python.h>
#include <stdint.h>
#include <stdbool.h>

/* Cache optimization support */
#ifdef __GNUC__
    #define LIKELY(x)   __builtin_expect(!!(x), 1)
    #define UNLIKELY(x) __builtin_expect(!!(x), 0)
    #define PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
#else
    #define LIKELY(x)   (x)
    #define UNLIKELY(x) (x)
    #define PREFETCH(addr, rw, locality) ((void)0)
#endif

/* Configuration constants */
#define DEFAULT_CAPACITY 8
#define MIN_CAPACITY 4
#define CACHE_LINE_SIZE 64

/* Node types */
typedef enum {
    NODE_LEAF = 0,
    NODE_BRANCH = 1
} NodeType;

/* Forward declarations */
typedef struct BPlusNode BPlusNode;
typedef struct BPlusTree BPlusTree;

/* 
 * Single array node structure optimized for cache locality.
 * Layout: [metadata][keys...][values/children...]
 * 
 * For leaf nodes: keys[0:capacity], values[capacity:capacity*2]
 * For branch nodes: keys[0:capacity], children[capacity:capacity*2+1]
 */
typedef struct BPlusNode {
    /* Metadata (fits in single cache line) */
    uint16_t num_keys;          /* Number of keys currently in node */
    uint16_t capacity;          /* Maximum keys this node can hold */
    NodeType type;              /* Leaf or branch node */
    uint8_t _unused;            /* Reserved for future use */
    uint8_t _padding[2];        /* Alignment padding */
    
    /* Links */
    struct BPlusNode *next;     /* Next leaf (for leaf nodes only) */

    /* Flexible array for keys and values/children (cache-line aligned) */
    /* Actual size allocated: capacity * 2 * sizeof(PyObject*) for leaves */
    /*                        (capacity * 2 + 1) * sizeof(PyObject*) for branches */
    PyObject *data[] __attribute__((aligned(CACHE_LINE_SIZE)));
} BPlusNode;

/* B+ Tree structure */
typedef struct BPlusTree {
    PyObject_HEAD               /* Python object header */
    BPlusNode *root;           /* Root node */
    BPlusNode *leaves;         /* Leftmost leaf (for iteration) */
    uint16_t capacity;         /* Node capacity */
    uint16_t min_keys;         /* Minimum keys per node (capacity/2) */
    size_t size;               /* Total number of key-value pairs */
    size_t modification_count; /* Counter incremented on each tree modification */
    
} BPlusTree;

/* Inline functions for fast array access */
static inline PyObject* node_get_key(BPlusNode *node, int index) {
    return node->data[index];
}

static inline PyObject* node_get_value(BPlusNode *node, int index) {
    return node->data[node->capacity + index];
}

static inline BPlusNode* node_get_child(BPlusNode *node, int index) {
    return (BPlusNode*)node->data[node->capacity + index];
}

static inline void node_set_key(BPlusNode *node, int index, PyObject *key) {
    node->data[index] = key;
}

static inline void node_set_value(BPlusNode *node, int index, PyObject *value) {
    node->data[node->capacity + index] = value;
}

static inline void node_set_child(BPlusNode *node, int index, BPlusNode *child) {
    node->data[node->capacity + index] = (PyObject*)child;
}

/* Prefetch child pointer for cache optimization */
static inline BPlusNode *node_prefetch_child(BPlusNode *node, int index) {
    BPlusNode *child = node_get_child(node, index);
#ifdef PREFETCH_HINTS
    PREFETCH(child, 0, 3);
#endif
    return child;
}

/* Function prototypes */

/* Fast comparison functions */
int fast_compare_lt(PyObject *a, PyObject *b);
int fast_compare_eq(PyObject *a, PyObject *b);

/* Cache optimization functions */
void* cache_aligned_alloc(size_t size);
void cache_aligned_free(void* ptr);

/* Node creation and destruction */
BPlusNode* node_create(NodeType type, uint16_t capacity);
void node_destroy(BPlusNode *node);

/* Node operations */
int node_find_position(BPlusNode *node, PyObject *key);
int node_insert_leaf(BPlusNode *node, PyObject *key, PyObject *value, 
                     BPlusNode **new_node, PyObject **split_key);
int node_insert_branch(BPlusNode *node, PyObject *key, BPlusNode *right_child,
                       BPlusNode **new_node, PyObject **split_key);
int node_delete(BPlusNode *node, PyObject *key);
PyObject* node_get(BPlusNode *node, PyObject *key);

/* Tree operations */
int tree_insert(BPlusTree *tree, PyObject *key, PyObject *value);
int tree_delete(BPlusTree *tree, PyObject *key);
PyObject* tree_get(BPlusTree *tree, PyObject *key);
BPlusNode* tree_find_leaf(BPlusTree *tree, PyObject *key);

/* Memory pool operations (removed) */

/* Utility functions */
void node_split_leaf(BPlusNode *node, BPlusNode *new_node);
void node_split_branch(BPlusNode *node, BPlusNode *new_node, PyObject **promoted_key);
int node_redistribute(BPlusNode *left, BPlusNode *right, PyObject *separator);
int node_merge(BPlusNode *left, BPlusNode *right, PyObject *separator);

/* Python C API functions */
PyObject* BPlusTree_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
int BPlusTree_init(BPlusTree *self, PyObject *args, PyObject *kwds);
void BPlusTree_dealloc(BPlusTree *self);
PyObject* BPlusTree_getitem(BPlusTree *self, PyObject *key);
int BPlusTree_setitem(BPlusTree *self, PyObject *key, PyObject *value);
int BPlusTree_delitem(BPlusTree *self, PyObject *key);
Py_ssize_t BPlusTree_length(BPlusTree *self);
int BPlusTree_contains(BPlusTree *self, PyObject *key);

#endif /* BPLUSTREE_H */

================================================
FILE: python/bplustree_c_src/bplustree_module.c
================================================
/*
 * B+ Tree Python Extension Module
 * 
 * Python C API implementation for high-performance B+ tree.
 */

#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
#include "bplustree.h"

/* GIL-release macros for pure-C lookup loops */
#define ENTER_TREE_LOOP Py_BEGIN_ALLOW_THREADS
#define EXIT_TREE_LOOP  Py_END_ALLOW_THREADS

/* GC clear/traverse prototypes */
static int BPlusTree_traverse(BPlusTree *self, visitproc visit, void *arg);
static int BPlusTree_clear(BPlusTree *self);

/* Method implementations */

PyObject *
BPlusTree_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {
    BPlusTree *self = PyObject_GC_New(BPlusTree, type);
    if (self != NULL) {
        self->root = NULL;
        self->leaves = NULL;
        self->capacity = DEFAULT_CAPACITY;
        self->min_keys = DEFAULT_CAPACITY / 2;
        self->size = 0;
        self->modification_count = 0;
        PyObject_GC_Track(self);
    }
    return (PyObject *)self;
}

int
BPlusTree_init(BPlusTree *self, PyObject *args, PyObject *kwds) {
    static char *kwlist[] = {"capacity", NULL};
    int capacity = DEFAULT_CAPACITY;
    
    if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i", kwlist, &capacity)) {
        return -1;
    }
    
    if (capacity < MIN_CAPACITY) {
        PyErr_Format(PyExc_ValueError, 
                     "capacity must be at least %d, got %d", 
                     MIN_CAPACITY, capacity);
        return -1;
    }
    
    self->capacity = capacity;
    self->min_keys = capacity / 2;
    
    /* Create initial root (leaf) */
    self->root = node_create(NODE_LEAF, capacity);
    if (!self->root) {
        return -1;
    }
    self->leaves = self->root;
    
    
    return 0;
}

void
BPlusTree_dealloc(BPlusTree *self) {
    PyObject_GC_UnTrack(self);
    BPlusTree_clear(self);
    if (self->root) {
        node_destroy(self->root);
    }
    PyObject_GC_Del(self);
}

PyObject *
BPlusTree_getitem(BPlusTree *self, PyObject *key) {
    /* Direct lookup without releasing the GIL to avoid unsafe Python API use */
    return tree_get(self, key);
}

int
BPlusTree_setitem(BPlusTree *self, PyObject *key, PyObject *value) {
    if (value == NULL) {
        return BPlusTree_delitem(self, key);
    }
    return tree_insert(self, key, value);
}

int
BPlusTree_delitem(BPlusTree *self, PyObject *key) {
    int result = tree_delete(self, key);
    if (result == -1) return -1;  /* Error already set */
    if (result == 0) {
        /* Key not found */
        PyErr_SetObject(PyExc_KeyError, key);
        return -1;
    }
    self->modification_count++;
    return 0;  /* Success */
}

Py_ssize_t
BPlusTree_length(BPlusTree *self) {
    return self->size;
}

int
BPlusTree_contains(BPlusTree *self, PyObject *key) {
    /* Check containment without releasing the GIL */
    PyObject *value = tree_get(self, key);
    if (value) {
        Py_DECREF(value);
        return 1;
    }
    PyErr_Clear();
    return 0;
}

/* Iterator implementation */

typedef struct {
    PyObject_HEAD
    BPlusTree *tree;
    BPlusNode *current_node;
    int current_index;
    int include_values;  /* 0 for keys(), 1 for items() */
    size_t modification_count;  /* Track tree modifications */
} BPlusTreeIterator;

static void
BPlusTreeIterator_dealloc(BPlusTreeIterator *self) {
    Py_XDECREF(self->tree);
    Py_TYPE(self)->tp_free((PyObject *)self);
}

static PyObject *
BPlusTreeIterator_next(BPlusTreeIterator *self) {
    /* Check if the tree has been modified since iterator creation */
    if (self->modification_count != self->tree->modification_count) {
        PyErr_SetString(PyExc_RuntimeError, 
                       "tree changed size during iteration");
        return NULL;
    }
    
    if (!self->current_node) {
        PyErr_SetNone(PyExc_StopIteration);
        return NULL;
    }
    
    /* Handle empty leaves at the beginning or during traversal */
    while (self->current_node && self->current_node->num_keys == 0) {
        self->current_node = self->current_node->next;
    }
    
    if (!self->current_node) {
        PyErr_SetNone(PyExc_StopIteration);
        return NULL;
    }
    
    if (self->current_index >= self->current_node->num_keys) {
        /* Move to next leaf, skipping empty ones */
        self->current_node = self->current_node->next;
        while (self->current_node && self->current_node->num_keys == 0) {
            self->current_node = self->current_node->next;
        }
        
        if (!self->current_node) {
            PyErr_SetNone(PyExc_StopIteration);
            return NULL;
        }
        
        self->current_index = 0;
    }
    
    PyObject *key = node_get_key(self->current_node, self->current_index);
    
    if (self->include_values) {
        PyObject *value = node_get_value(self->current_node, self->current_index);
        PyObject *tuple = PyTuple_New(2);
        if (!tuple) return NULL;
        
        Py_INCREF(key);
        Py_INCREF(value);
        PyTuple_SET_ITEM(tuple, 0, key);
        PyTuple_SET_ITEM(tuple, 1, value);
        self->current_index++;
        return tuple;
    } else {
        self->current_index++;
        Py_INCREF(key);
        return key;
    }
}

static PyTypeObject BPlusTreeIteratorType = {
    PyVarObject_HEAD_INIT(NULL, 0)
    .tp_name = "bplustree_c.BPlusTreeIterator",
    .tp_basicsize = sizeof(BPlusTreeIterator),
    .tp_itemsize = 0,
    .tp_dealloc = (destructor)BPlusTreeIterator_dealloc,
    .tp_flags = Py_TPFLAGS_DEFAULT,
    .tp_doc =
        "B+ tree iterator; generate keys or (key, value) pairs\n"
        "depending on invocation via keys() or items()",
    .tp_iter = PyObject_SelfIter,
    .tp_iternext = (iternextfunc)BPlusTreeIterator_next,
};


static PyObject *
BPlusTree_iter(BPlusTree *self) {
    BPlusTreeIterator *iter = PyObject_New(BPlusTreeIterator, &BPlusTreeIteratorType);
    if (!iter) return NULL;
    
    Py_INCREF(self);
    iter->tree = self;
    
    /* Find the first leaf node by traversing from root */
    BPlusNode *first_leaf = self->root;
    if (first_leaf) {
        while (first_leaf->type == NODE_BRANCH) {
            first_leaf = node_get_child(first_leaf, 0);
            if (!first_leaf) break;
        }
    }
    
    iter->current_node = first_leaf;
    iter->current_index = 0;
    iter->include_values = 0;
    iter->modification_count = self->modification_count;
    
    return (PyObject *)iter;
}

static PyObject *
BPlusTree_keys(BPlusTree *self, PyObject *Py_UNUSED(ignored)) {
    return BPlusTree_iter(self);
}

static PyObject *
BPlusTree_items(BPlusTree *self, PyObject *Py_UNUSED(args)) {
    BPlusTreeIterator *iter = PyObject_New(BPlusTreeIterator, &BPlusTreeIteratorType);
    if (!iter) return NULL;
    
    Py_INCREF(self);
    iter->tree = self;
    
    /* Find the first leaf node by traversing from root */
    BPlusNode *first_leaf = self->root;
    if (first_leaf) {
        while (first_leaf->type == NODE_BRANCH) {
            first_leaf = node_get_child(first_leaf, 0);
            if (!first_leaf) break;
        }
    }
    
    iter->current_node = first_leaf;
    iter->current_index = 0;
    iter->include_values = 1;
    iter->modification_count = self->modification_count;
    
    return (PyObject *)iter;
}


/* Method definitions */

static PyMethodDef BPlusTree_methods[] = {
    {"keys", (PyCFunction)BPlusTree_keys, METH_NOARGS,
     "Return an iterator over the tree's keys"},
    {"items", (PyCFunction)BPlusTree_items, METH_VARARGS,
     "Return an iterator over the tree's (key, value) pairs"},
    {NULL, NULL, 0, NULL}  /* Sentinel */
};

/* Mapping protocol */

static PyMappingMethods BPlusTree_as_mapping = {
    (lenfunc)BPlusTree_length,
    (binaryfunc)BPlusTree_getitem,
    (objobjargproc)BPlusTree_setitem
};

/* Module-level methods for testing and diagnostics */
static PyObject *
py_check_data_alignment(PyObject *self, PyObject *args)
{
    unsigned int capacity = DEFAULT_CAPACITY;
    if (!PyArg_ParseTuple(args, "|I", &capacity)) {
        return NULL;
    }
    BPlusNode *node = node_create(NODE_LEAF, capacity);
    if (!node) {
        return NULL;
    }
    uintptr_t addr = (uintptr_t)node->data;
    node_destroy(node);
    if (addr % CACHE_LINE_SIZE == 0) {
        Py_RETURN_TRUE;
    }
    Py_RETURN_FALSE;
}

static PyMethodDef module_methods[] = {
    {"_check_data_alignment", py_check_data_alignment, METH_VARARGS,
     "Return True if node->data is aligned to CACHE_LINE_SIZE (optional capacity)"},
    {NULL, NULL, 0, NULL}
};

/* Sequence protocol (for 'in' operator) */

static PySequenceMethods BPlusTree_as_sequence = {
    0,                          /* sq_length */
    0,                          /* sq_concat */
    0,                          /* sq_repeat */
    0,                          /* sq_item */
    0,                          /* sq_slice */
    0,                          /* sq_ass_item */
    0,                          /* sq_ass_slice */
    (objobjproc)BPlusTree_contains, /* sq_contains */
};

/* Common GC operation: traverse or clear Python references in a node and its children. */
static int
node_gc_op(BPlusNode *node, visitproc visit, void *arg, int clear)
{
    if (!node) {
        return 0;
    }
    for (int i = 0; i < node->num_keys; i++) {
        if (clear) {
            Py_CLEAR(node->data[i]);
        } else {
            Py_VISIT(node_get_key(node, i));
        }
    }
    if (node->type == NODE_LEAF) {
        for (int i = 0; i < node->num_keys; i++) {
            if (clear) {
                Py_CLEAR(node->data[node->capacity + i]);
            } else {
                Py_VISIT(node_get_value(node, i));
            }
        }
    } else {
        for (int i = 0; i <= node->num_keys; i++) {
            BPlusNode *child = node_get_child(node, i);
            if (clear) {
                node_gc_op(child, NULL, NULL, 1);
            } else if (child && node_gc_op(child, visit, arg, 0)) {
                return -1;
            }
        }
    }
    return 0;
}

static int
node_traverse(BPlusNode *node, visitproc visit, void *arg)
{
    return node_gc_op(node, visit, arg, 0);
}

static int
node_clear_gc(BPlusNode *node)
{
    return node_gc_op(node, NULL, NULL, 1);
}


static int
BPlusTree_traverse(BPlusTree *self, visitproc visit, void *arg) {
    if (self->root) {
        if (node_traverse(self->root, visit, arg) != 0) {
            return -1;
        }
    }
    return 0;
}


static int
BPlusTree_clear(BPlusTree *self) {
    if (self->root) {
        node_clear_gc(self->root);
    }
    return 0;
}

/* Type definition */

static PyTypeObject BPlusTreeType = {
    PyVarObject_HEAD_INIT(NULL, 0)
    .tp_name = "bplustree_c.BPlusTree",
    .tp_doc =
        "High-performance B+ tree implementation\n"
        "\n"
        "Mapping interface:\n"
        "  __getitem__(key) -> value\n"
        "  __setitem__(key, value)\n"
        "  __delitem__(key)\n"
        "  __contains__(key) -> bool\n"
        "  __len__() -> int\n"
        "  keys() -> iterator of keys\n"
        "  items() -> iterator of (key, value) pairs",
    .tp_basicsize = sizeof(BPlusTree),
    .tp_itemsize = 0,
    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
    .tp_new = BPlusTree_new,
    .tp_init = (initproc)BPlusTree_init,
    .tp_dealloc = (destructor)BPlusTree_dealloc,
    .tp_traverse = (traverseproc)BPlusTree_traverse,
    .tp_clear = (inquiry)BPlusTree_clear,
    .tp_as_mapping = &BPlusTree_as_mapping,
    .tp_as_sequence = &BPlusTree_as_sequence,
    .tp_methods = BPlusTree_methods,
    .tp_iter = (getiterfunc)BPlusTree_iter,
};

/* Module definition */

static PyModuleDef bplustree_module = {
    PyModuleDef_HEAD_INIT,
    .m_name = "bplustree_c",
    .m_doc =
        "High-performance B+ tree C extension supporting mapping interface:\n"
        "efficient insertion, deletion, lookup, and range scans",
    .m_size = -1,
    .m_methods = module_methods,
};

PyMODINIT_FUNC
PyInit_bplustree_c(void) {
    PyObject *m;
    
    if (PyType_Ready(&BPlusTreeType) < 0)
        return NULL;
    
    if (PyType_Ready(&BPlusTreeIteratorType) < 0)
        return NULL;
    
    m = PyModule_Create(&bplustree_module);
    if (m == NULL)
        return NULL;
    
    Py_INCREF(&BPlusTreeType);
    if (PyModule_AddObject(m, "BPlusTree", (PyObject *)&BPlusTreeType) < 0) {
        Py_DECREF(&BPlusTreeType);
        Py_DECREF(m);
        return NULL;
    }
    
    return m;
}

================================================
FILE: python/bplustree_c_src/node_ops.c
================================================
/*
 * B+ Tree Node Operations
 * 
 * Core node operations optimized for performance.
 * Uses vectorized search where possible.
 */

#include "bplustree.h"
#include <string.h>
#include <stdlib.h>

#ifdef _WIN32
#include <malloc.h>
#endif

/* Fast comparison function with type-specific optimizations */
int fast_compare_lt(PyObject *a, PyObject *b) {
    /* Fast path for integers */
    if (PyLong_CheckExact(a) && PyLong_CheckExact(b)) {
        /* For small integers, use direct comparison */
        long val_a = PyLong_AsLong(a);
        long val_b = PyLong_AsLong(b);
        if (!PyErr_Occurred()) {
            return val_a < val_b ? 1 : 0;
        }
        PyErr_Clear(); /* Clear error and fall through */
    }
    
    /* Fast path for strings */
    if (PyUnicode_CheckExact(a) && PyUnicode_CheckExact(b)) {
        int result = PyUnicode_Compare(a, b);
        if (result != -1 || !PyErr_Occurred()) {
            return result < 0 ? 1 : 0;
        }
        PyErr_Clear(); /* Clear error and fall through */
    }
    
    /* Fall back to general comparison */
    return PyObject_RichCompareBool(a, b, Py_LT);
}

/* Fast equality comparison function */
int fast_compare_eq(PyObject *a, PyObject *b) {
    /* Fast path for integers */
    if (PyLong_CheckExact(a) && PyLong_CheckExact(b)) {
        long val_a = PyLong_AsLong(a);
        long val_b = PyLong_AsLong(b);
        if (!PyErr_Occurred()) {
            return val_a == val_b ? 1 : 0;
        }
        PyErr_Clear();
    }
    
    /* Fast path for strings */
    if (PyUnicode_CheckExact(a) && PyUnicode_CheckExact(b)) {
        int result = PyUnicode_Compare(a, b);
        if (result != -1 || !PyErr_Occurred()) {
            return result == 0 ? 1 : 0;
        }
        PyErr_Clear();
    }
    
    /* Fall back to general comparison */
    return PyObject_RichCompareBool(a, b, Py_EQ);
}

/* Binary search to find position for key */
int node_find_position(BPlusNode *node, PyObject *key) {
    int left = 0;
    int right = node->num_keys;
    
    while (left < right) {
        int mid = (left + right) / 2;
        PyObject *mid_key = node_get_key(node, mid);
        
        int result = fast_compare_lt(mid_key, key);
        if (result < 0) {
            return -1;  /* Error in comparison */
        }
        
        if (result) {
            left = mid + 1;
        } else {
            right = mid;
        }
    }
    
    return left;
}

/* Create a new node */
BPlusNode* node_create(NodeType type, uint16_t capacity) {
    size_t data_size;
    
    if (type == NODE_LEAF) {
        data_size = capacity * 2 * sizeof(PyObject*);
    } else {
        data_size = (capacity * 2 + 1) * sizeof(PyObject*);
    }
    
    BPlusNode *node = (BPlusNode*)cache_aligned_alloc(sizeof(BPlusNode) + data_size);
    if (!node) {
        PyErr_NoMemory();
        return NULL;
    }
    
    /* Initialize metadata */
    node->num_keys = 0;
    node->capacity = capacity;
    node->type = type;
    node->_unused = 0;  /* Reserved for future use */
    node->next = NULL;
    
    /* Clear data array */
    memset(node->data, 0, data_size);
    
    return node;
}

/* Destroy a node and decref all Python objects */
void node_destroy(BPlusNode *node) {
    if (!node) return;
    
    /* Decref all keys */
    for (int i = 0; i < node->num_keys; i++) {
        Py_XDECREF(node_get_key(node, i));
    }
    
    if (node->type == NODE_LEAF) {
        /* Decref all values */
        for (int i = 0; i < node->num_keys; i++) {
            Py_XDECREF(node_get_value(node, i));
        }
    } else {
        /* Recursively destroy children */
        for (int i = 0; i <= node->num_keys; i++) {
            BPlusNode *child = node_get_child(node, i);
            if (child) {
                node_destroy(child);
            }
        }
    }
    
    cache_aligned_free(node);
}

/* Clear a single slot: decref or destroy payload and null out key/value or child pointer */
static void node_clear_slot(BPlusNode *node, int i) {
    if (i < 0 || i >= node->capacity) {
        return;  /* Invalid index */
    }
    
    if (node->type == NODE_LEAF) {
        Py_XDECREF(node_get_key(node, i));
        Py_XDECREF(node_get_value(node, i));
        node_set_key(node, i, NULL);
        node_set_value(node, i, NULL);
    } else {
        /* For branch nodes, we only clear during deletion operations
         * where it's safe to destroy the child subtree */
        BPlusNode *child = node_get_child(node, i);
        if (child) {
            node_destroy(child);
        }
        Py_XDECREF(node_get_key(node, i));
        node_set_key(node, i, NULL);
        node_set_child(node, i, NULL);
    }
}

/* Insert into leaf node */
int node_insert_leaf(BPlusNode *node, PyObject *key, PyObject *value, 
                     BPlusNode **new_node, PyObject **split_key) {
    int pos = node_find_position(node, key);
    if (pos < 0) return -1;  /* Comparison error */
    
    /* Check if key already exists */
    if (pos < node->num_keys) {
        PyObject *existing_key = node_get_key(node, pos);
        int cmp = fast_compare_eq(existing_key, key);
        if (cmp < 0) return -1;  /* Comparison error */
        
        if (cmp) {
            /* Update existing value */
            PyObject *old_value = node_get_value(node, pos);
            Py_INCREF(value);
            node_set_value(node, pos, value);
            Py_DECREF(old_value);
            return -2;  /* Special return code for update */
        }
    }
    
    /* Check if split is needed */
    if (node->num_keys >= node->capacity) {
        /* Create new node */
        *new_node = node_create(NODE_LEAF, node->capacity);
        if (!*new_node) return -1;
        
        /* Temporary arrays for redistribution */
        PyObject **temp_keys = PyMem_Malloc((node->capacity + 1) * sizeof(PyObject*));
        PyObject **temp_values = PyMem_Malloc((node->capacity + 1) * sizeof(PyObject*));
        if (!temp_keys || !temp_values) {
            PyMem_Free(temp_keys);
            PyMem_Free(temp_values);
            node_destroy(*new_node);
            PyErr_NoMemory();
            return -1;
        }
        
        /* Copy existing + new into temp arrays */
        int j = 0;
        for (int i = 0; i < pos; i++) {
            temp_keys[j] = node_get_key(node, i);
            temp_values[j] = node_get_value(node, i);
            j++;
        }
        temp_keys[j] = key;
        temp_values[j] = value;
        j++;
        for (int i = pos; i < node->num_keys; i++) {
            temp_keys[j] = node_get_key(node, i);
            temp_values[j] = node_get_value(node, i);
            j++;
        }
        
        /* Split at midpoint - exactly like Python code */
        int mid = node->capacity / 2;  /* Same as Python: self.capacity // 2 */

        /* Keep first half in current node */
        node->num_keys = mid;
        for (int i = 0; i < mid; i++) {
            Py_INCREF(temp_keys[i]);
            Py_INCREF(temp_values[i]);
            node_set_key(node, i, temp_keys[i]);
            node_set_value(node, i, temp_values[i]);
        }

        /* Clear old slots beyond midpoint - DO NOT DECREF as items were moved to temp arrays */
        for (int i = mid; i < node->capacity; i++) {
            node_set_key(node, i, NULL);
            node_set_value(node, i, NULL);
        }

        /* Move second half to new node */
        int total_items = node->capacity + 1;
        (*new_node)->num_keys = total_items - mid;
        for (int i = 0; i < (*new_node)->num_keys; i++) {
            Py_INCREF(temp_keys[mid + i]);
            Py_INCREF(temp_values[mid + i]);
            node_set_key(*new_node, i, temp_keys[mid + i]);
            node_set_value(*new_node, i, temp_values[mid + i]);
        }
        
        /* Update links */
        (*new_node)->next = node->next;
        node->next = *new_node;
        
        /* Flags no longer needed after SIMD removal */
        
        /* Set split key */
        *split_key = node_get_key(*new_node, 0);
        Py_INCREF(*split_key);
        
        /* Clean up temps */
        PyMem_Free(temp_keys);
        PyMem_Free(temp_values);
        
        return 1;  /* Split occurred */
    }
    
    /* Normal insert - shift elements right */
    for (int i = node->num_keys; i > pos; i--) {
        node_set_key(node, i, node_get_key(node, i - 1));
        node_set_value(node, i, node_get_value(node, i - 1));
    }
    
    /* Insert new key-value */
    Py_INCREF(key);
    Py_INCREF(value);
    node_set_key(node, pos, key);
    node_set_value(node, pos, value);
    node->num_keys++;
    
    /* No flag updates needed after SIMD removal */
    
    return 0;  /* No split */
}

/* Delete key from leaf node */
int node_delete(BPlusNode *node, PyObject *key) {
    if (node->type != NODE_LEAF) {
        return 0;  /* Can only delete from leaf nodes directly */
    }
    
    int pos = node_find_position(node, key);
    if (pos < 0) return -1;  /* Comparison error */
    
    /* Check if key exists */
    if (pos >= node->num_keys) {
        return 0;  /* Key not found */
    }
    
    PyObject *found_key = node_get_key(node, pos);
    int cmp = fast_compare_eq(found_key, key);
    if (cmp < 0) return -1;  /* Comparison error */
    if (!cmp) return 0;      /* Key not found */
    
    /* Clear the removed slot */
    node_clear_slot(node, pos);

    /* Shift elements left to fill the gap */
    for (int i = pos; i < node->num_keys - 1; i++) {
        node_set_key(node, i, node_get_key(node, i + 1));
        node_set_value(node, i, node_get_value(node, i + 1));
    }

    /* Clear the last slot */
    node->num_keys--;
    node_set_key(node, node->num_keys, NULL);
    node_set_value(node, node->num_keys, NULL);

    return 1;  /* Successfully deleted */
}

/* Get value from leaf node */
PyObject* node_get(BPlusNode *node, PyObject *key) {
    int pos = node_find_position(node, key);
    if (pos < 0) return NULL;  /* Comparison error */
    
    if (pos < node->num_keys) {
        PyObject *found_key = node_get_key(node, pos);
        int cmp = fast_compare_eq(found_key, key);
        if (cmp < 0) return NULL;  /* Comparison error */
        
        if (cmp) {
            PyObject *value = node_get_value(node, pos);
            Py_INCREF(value);
            return value;
        }
    }
    
    /* Key not found */
    PyErr_SetObject(PyExc_KeyError, key);
    return NULL;
}

/* Cache-aligned memory allocation functions */
void* cache_aligned_alloc(size_t size) {
#ifdef _WIN32
    return _aligned_malloc(size, CACHE_LINE_SIZE);
#else
    void *ptr;
    if (posix_memalign(&ptr, CACHE_LINE_SIZE, size) != 0) {
        return NULL;
    }
    return ptr;
#endif
}

void cache_aligned_free(void* ptr) {
#ifdef _WIN32
    _aligned_free(ptr);
#else
    free(ptr);
#endif
}

================================================
FILE: python/bplustree_c_src/tree_ops.c
================================================
/*
 * B+ Tree Operations
 * 
 * High-level tree operations that coordinate node operations.
 */

#include "bplustree.h"

/* Find leaf node that should contain the key */
/* Find leaf node that should contain the key */
BPlusNode* tree_find_leaf(BPlusTree *tree, PyObject *key) {
    BPlusNode *node = tree->root;
    
    while (node->type == NODE_BRANCH) {
        int pos = node_find_position(node, key);
        if (pos < 0) {
            return NULL;
        }
        /* bisect_right semantics: advance past equal keys */
        if (pos < node->num_keys) {
            PyObject *node_key = node_get_key(node, pos);
            int eq = fast_compare_eq(node_key, key);
            if (eq < 0) {
                return NULL;
            }
            if (eq) {
                pos++;
            }
        }
        /* Ensure pos is within valid child range */
        if (pos > node->num_keys) {
            return NULL;
        }
        {
            node = node_prefetch_child(node, pos);
        }
    }
    
    return node;
}

/* Recursive insert helper */
static int tree_insert_recursive(BPlusNode *node, PyObject *key, PyObject *value,
                                BPlusNode **new_node, PyObject **split_key) {
    if (node->type == NODE_LEAF) {
        return node_insert_leaf(node, key, value, new_node, split_key);
    }
    
    /* Find child to insert into */
    int child_pos = node_find_position(node, key);
    if (child_pos < 0) {
        return -1;
    }
    /* bisect_right semantics: advance past equal keys */
    if (child_pos < node->num_keys) {
        PyObject *node_key = node_get_key(node, child_pos);
        int eq = fast_compare_eq(node_key, key);
        if (eq < 0) {
            return -1;
        }
        if (eq) {
            child_pos++;
        }
    }
    BPlusNode *child = node_get_child(node, child_pos);
    BPlusNode *new_child = NULL;
    PyObject *new_key = NULL;
    
    int result = tree_insert_recursive(child, key, value, &new_child, &new_key);
    if (result < 0) return result;  /* Error or update - propagate as-is */
    if (result == 0) return 0;      /* No split */
    
    /* Child was split, need to insert new_key and new_child into this node */
    return node_insert_branch(node, new_key, new_child, new_node, split_key);
}

/* Insert key-value pair into tree */
int tree_insert(BPlusTree *tree, PyObject *key, PyObject *value) {
    BPlusNode *new_node = NULL;
    PyObject *split_key = NULL;
    
    int result = tree_insert_recursive(tree->root, key, value, &new_node, &split_key);
    if (result == -1) return -1;  /* Error */
    if (result == -2) {
        tree->modification_count++;  /* Update - increment modification count */
        return 0;   /* Update - don't increment size */
    }
    
    if (result > 0) {
        /* Root was split, create new root */
        BPlusNode *new_root = node_create(NODE_BRANCH, tree->capacity);
        if (!new_root) {
            Py_XDECREF(split_key);
            return -1;
        }
        
        /* Set up new root with old root as first child */
        node_set_child(new_root, 0, tree->root);
        node_set_key(new_root, 0, split_key);
        node_set_child(new_root, 1, new_node);
        new_root->num_keys = 1;
        
        tree->root = new_root;
    }
    
    /* Increment size for new insertions (result == 0 or result > 0) */
    tree->size++;
    tree->modification_count++;
    
    return 0;
}

/* Delete key from tree */
int tree_delete(BPlusTree *tree, PyObject *key) {
    BPlusNode *leaf = tree_find_leaf(tree, key);
    if (!leaf) return -1;
    
    int result = node_delete(leaf, key);
    if (result == 1) {
        tree->size--;  /* Successfully deleted */
        tree->modification_count++;
    }
    
    return result;
}

/* Get value for key */
PyObject* tree_get(BPlusTree *tree, PyObject *key) {
    BPlusNode *leaf = tree_find_leaf(tree, key);
    if (!leaf) return NULL;
    return node_get(leaf, key);
}

/* Insert into branch node */
int node_insert_branch(BPlusNode *node, PyObject *key, BPlusNode *right_child,
                       BPlusNode **new_node, PyObject **split_key) {
    int pos = node_find_position(node, key);
    if (pos < 0) return -1;
    
    /* Check if split is needed */
    if (node->num_keys >= node->capacity) {
        /* Create new node */
        *new_node = node_create(NODE_BRANCH, node->capacity);
        if (!*new_node) return -1;
        
        /* Temporary arrays for redistribution */
        PyObject **temp_keys = PyMem_Malloc((node->capacity + 1) * sizeof(PyObject*));
        BPlusNode **temp_children = PyMem_Malloc((node->capacity + 2) * sizeof(BPlusNode*));
        if (!temp_keys || !temp_children) {
            PyMem_Free(temp_keys);
            PyMem_Free(temp_children);
            node_destroy(*new_node);
            PyErr_NoMemory();
            return -1;
        }
        
        /* Copy existing + new into temp arrays */
        temp_children[0] = node_get_child(node, 0);
        
        int j = 0;
        for (int i = 0; i < pos; i++) {
            temp_keys[j] = node_get_key(node, i);
            temp_children[j + 1] = node_get_child(node, i + 1);
            j++;
        }
        temp_keys[j] = key;
        temp_children[j + 1] = right_child;
        j++;
        for (int i = pos; i < node->num_keys; i++) {
            temp_keys[j] = node_get_key(node, i);
            temp_children[j + 1] = node_get_child(node, i + 1);
            j++;
        }
        
        /* Split at midpoint */
        int mid = node->capacity / 2;
        *split_key = temp_keys[mid];
        Py_INCREF(*split_key);
        
        /* Keep first half in current node */
        node->num_keys = mid;
        for (int i = 0; i < mid; i++) {
            Py_INCREF(temp_keys[i]);
            node_set_key(node, i, temp_keys[i]);
        }
        for (int i = 0; i <= mid; i++) {
            node_set_child(node, i, temp_children[i]);
        }
        
        /* Move second half to new node */
        (*new_node)->num_keys = node->capacity - mid;
        for (int i = 0; i < (*new_node)->num_keys; i++) {
            Py_INCREF(temp_keys[mid + 1 + i]);
            node_set_key(*new_node, i, temp_keys[mid + 1 + i]);
        }
        for (int i = 0; i <= (*new_node)->num_keys; i++) {
            node_set_child(*new_node, i, temp_children[mid + 1 + i]);
        }
        
        /* Clean up temps */
        PyMem_Free(temp_keys);
        PyMem_Free(temp_children);
        
        return 1;  /* Split occurred */
    }
    
    /* Normal insert - shift elements right */
    for (int i = node->num_keys; i > pos; i--) {
        node_set_key(node, i, node_get_key(node, i - 1));
        node_set_child(node, i + 1, node_get_child(node, i));
    }
    
    /* Insert new key and child */
    Py_INCREF(key);
    node_set_key(node, pos, key);
    node_set_child(node, pos + 1, right_child);
    node->num_keys++;
    
    return 0;  /* No split */
}

================================================
FILE: python/conftest.py
================================================
"""
Pytest configuration for building the C extension before tests.
"""
import sys
import subprocess
from pathlib import Path

here = Path(__file__).parent
subprocess.check_call(
    [sys.executable, "setup.py", "build_ext", "--inplace"], cwd=str(here)
)

# Ensure the C extension built in this directory is importable
sys.path.insert(0, str(here))


================================================
FILE: python/coverage.xml
================================================
<?xml version="1.0" ?>
<coverage version="7.8.2" timestamp="1751690296947" lines-valid="524" lines-covered="381" line-rate="0.7271" branches-valid="176" branches-covered="103" branch-rate="0.5852" complexity="0">
	<!-- Generated by coverage.py: https://coverage.readthedocs.io/en/7.8.2 -->
	<!-- Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd -->
	<sources>
		<source>/Users/kentb/Dropbox/Mac/Documents/augment-projects/BPlusTree3/python/bplustree</source>
	</sources>
	<packages>
		<package name="." line-rate="0.7271" branch-rate="0.5852" complexity="0">
			<classes>
				<class name="__init__.py" filename="__init__.py" complexity="0" line-rate="0.1299" branch-rate="0">
					<methods/>
					<lines>
						<line number="11" hits="1"/>
						<line number="13" hits="1"/>
						<line number="14" hits="1"/>
						<line number="15" hits="1"/>
						<line number="16" hits="1"/>
						<line number="19" hits="0"/>
						<line number="22" hits="0"/>
						<line number="24" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="25,27"/>
						<line number="25" hits="0"/>
						<line number="27" hits="0"/>
						<line number="29" hits="0"/>
						<line number="31" hits="0"/>
						<line number="32" hits="0"/>
						<line number="33" hits="0"/>
						<line number="34" hits="0"/>
						<line number="36" hits="0"/>
						<line number="38" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="exit,39"/>
						<line number="39" hits="0"/>
						<line number="41" hits="0"/>
						<line number="45" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="exit,47"/>
						<line number="47" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="45,48"/>
						<line number="48" hits="0"/>
						<line number="49" hits="0"/>
						<line number="51" hits="0"/>
						<line number="53" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="54,57"/>
						<line number="54" hits="0"/>
						<line number="57" hits="0"/>
						<line number="58" hits="0"/>
						<line number="59" hits="0"/>
						<line number="60" hits="0"/>
						<line number="61" hits="0"/>
						<line number="62" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="63,64"/>
						<line number="63" hits="0"/>
						<line number="64" hits="0"/>
						<line number="66" hits="0"/>
						<line number="68" hits="0"/>
						<line number="70" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="71,75"/>
						<line number="71" hits="0"/>
						<line number="72" hits="0"/>
						<line number="73" hits="0"/>
						<line number="74" hits="0"/>
						<line number="75" hits="0"/>
						<line number="77" hits="0"/>
						<line number="79" hits="0"/>
						<line number="80" hits="0"/>
						<line number="81" hits="0"/>
						<line number="82" hits="0"/>
						<line number="83" hits="0"/>
						<line number="85" hits="0"/>
						<line number="87" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="89,91"/>
						<line number="89" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="exit,90"/>
						<line number="90" hits="0"/>
						<line number="91" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="93,97"/>
						<line number="93" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="exit,94"/>
						<line number="94" hits="0"/>
						<line number="97" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="exit,98"/>
						<line number="98" hits="0"/>
						<line number="100" hits="0"/>
						<line number="102" hits="0"/>
						<line number="103" hits="0" branch="true" condition-coverage="0% (0/2)" missing-branches="104,105"/>
						<line number="104" hits="0"/>
						<line number="105" hits="0"/>
						<line number="107" hits="0"/>
						<line number="108" hits="0"/>
						<line number="110" hits="0"/>
						<line number="112" hits="0"/>
						<line number="113" hits="0"/>
						<line number="115" hits="0"/>
						<line number="117" hits="0"/>
						<line number="118" hits="0"/>
						<line number="120" hits="0"/>
						<line number="122" hits="0"/>
						<line number="125" hits="1"/>
						<line number="127" hits="1"/>
						<line number="128" hits="1"/>
						<line number="131" hits="1"/>
						<line number="133" hits="1"/>
					</lines>
				</class>
				<class name="bplus_tree.py" filename="bplus_tree.py" complexity="0" line-rate="0.83" branch-rate="0.6867">
					<methods/>
					<lines>
						<line number="8" hits="1"/>
						<line number="9" hits="1"/>
						<line number="10" hits="1"/>
						<line number="12" hits="1"/>
						<line number="15" hits="1"/>
						<line number="16" hits="1"/>
						<line number="17" hits="1"/>
						<line number="18" hits="1"/>
						<line number="21" hits="1"/>
						<line number="24" hits="1"/>
						<line number="27" hits="1"/>
						<line number="30" hits="1"/>
						<line number="33" hits="1"/>
						<line number="58" hits="1
Download .txt
gitextract_q6j9thfa/

├── .claude/
│   └── system_prompt_additions.md
├── .devcontainer/
│   └── devcontainer.json
├── .github/
│   └── workflows/
│       ├── build-wheels.yml
│       ├── performance-tracking.yml
│       ├── python-ci.yml
│       ├── release.yml
│       └── rust-ci.yml
├── .gitignore
├── .vscode/
│   └── settings.json
├── Cargo.toml
├── LICENSE
├── README.md
├── agent.md
├── analyze_programming_time.py
├── arena_elimination_analysis.md
├── commits.txt
├── docs/
│   ├── adr/
│   │   └── ADR-003-compressed-node-limitations.md
│   ├── delete_operations_call_graph.md
│   ├── delete_optimization_plan.md
│   └── iteration_optimization_plan.md
├── python/
│   ├── CHANGELOG.md
│   ├── LICENSE
│   ├── MANIFEST.in
│   ├── README.md
│   ├── benchmarks/
│   │   └── performance_benchmark.py
│   ├── bplustree/
│   │   ├── __init__.py
│   │   └── bplus_tree.py
│   ├── bplustree_c_src/
│   │   ├── bplustree.h
│   │   ├── bplustree_module.c
│   │   ├── node_ops.c
│   │   └── tree_ops.c
│   ├── conftest.py
│   ├── coverage.xml
│   ├── docs/
│   │   ├── API_REFERENCE.md
│   │   ├── CAPACITY_OPTIMIZATION_ANALYSIS.md
│   │   ├── COMPETITIVE_ADVANTAGES.md
│   │   ├── C_EXTENSION_IMPROVEMENT_PLAN.md
│   │   ├── C_EXTENSION_SEGFAULT_FIX.md
│   │   ├── GA_READINESS_PLAN.md
│   │   ├── LOOKUP_PERFORMANCE_ANALYSIS.md
│   │   ├── OPTIMIZATION_RESULTS.md
│   │   ├── PERFORMANCE_HISTORY.md
│   │   ├── PERFORMANCE_OPTIMIZATION_PLAN.md
│   │   ├── README_benchmark.md
│   │   ├── STRUCTURAL_IMPROVEMENTS.md
│   │   ├── THREAD_SAFETY.md
│   │   ├── advanced_usage.md
│   │   ├── installation.md
│   │   ├── migration_guide.md
│   │   ├── performance_guide.md
│   │   ├── quickstart.md
│   │   └── troubleshooting.md
│   ├── examples/
│   │   ├── basic_usage.py
│   │   ├── migration_guide.py
│   │   ├── performance_demo.py
│   │   └── range_queries.py
│   ├── py.typed
│   ├── pyproject.toml
│   ├── setup.py
│   ├── tests/
│   │   ├── __init__.py
│   │   ├── _invariant_checker.py
│   │   ├── comprehensive_fuzz_test.py
│   │   ├── fuzz_test.py
│   │   ├── test_bplus_tree.py
│   │   ├── test_c_extension.py
│   │   ├── test_c_extension_comprehensive.py
│   │   ├── test_c_extension_segfault_fix.py
│   │   ├── test_compile_flags.py
│   │   ├── test_data_alignment.py
│   │   ├── test_dictionary_api.py
│   │   ├── test_docstyle.py
│   │   ├── test_fuzz_discovered_patterns.py
│   │   ├── test_gc_support.py
│   │   ├── test_gprof_harness.py
│   │   ├── test_import_error_fallback.py
│   │   ├── test_invariant_bug.py
│   │   ├── test_iterator.py
│   │   ├── test_iterator_modification_safety.py
│   │   ├── test_leak_detection.py
│   │   ├── test_max_occupancy_bug.py
│   │   ├── test_memory_leaks.py
│   │   ├── test_multithreaded_lookup.py
│   │   ├── test_no_segfaults.py
│   │   ├── test_node_split_minimal.py
│   │   ├── test_optimized_bplus_tree.py
│   │   ├── test_performance_baseline.py
│   │   ├── test_performance_benchmarks.py
│   │   ├── test_performance_regression.py
│   │   ├── test_performance_vs_sorteddict.py
│   │   ├── test_prefetch_microbench.py
│   │   ├── test_proper_deletion.py
│   │   ├── test_segfault_regression.py
│   │   ├── test_single_array_int_optimization.py
│   │   ├── test_single_child_parent.py
│   │   ├── test_stress_edge_cases.py
│   │   └── test_stress_large_datasets.py
│   └── tmp/
│       └── xcrun_db
├── rust/
│   ├── API_COMPLETION_ROADMAP.md
│   ├── API_COMPLETION_STATUS.md
│   ├── BTREEMAP_COMPARISON.md
│   ├── BTREE_ADVANTAGES.md
│   ├── Cargo.toml
│   ├── DELETE_PROFILING_REPORT.md
│   ├── ENTRY_API_TRADEOFFS.md
│   ├── HOTSPOT_ANALYSIS.md
│   ├── IMPLEMENTATION_ANALYSIS.md
│   ├── MEMORY_OPTIMIZATION_PLAN.md
│   ├── MEMORY_OPTIMIZATION_RESULTS.md
│   ├── MODULARIZATION_PLAN.md
│   ├── MODULARIZATION_PLAN_REVISED.md
│   ├── PERFORMANCE_ANALYSIS.md
│   ├── PERFORMANCE_LOG.md
│   ├── RANGE_SCAN_PROFILING_REPORT.md
│   ├── README.md
│   ├── RECOMMENDATIONS.md
│   ├── RUNTIME_PERFORMANCE_ANALYSIS.md
│   ├── benches/
│   │   ├── comparison.rs
│   │   ├── profiling_benchmark.rs
│   │   ├── quick_clone_bench.rs
│   │   └── range_scan_profiling.rs
│   ├── docs/
│   │   ├── BENCHMARK_RESULTS.md
│   │   ├── CLAUDE.md
│   │   ├── CODE_DUPLICATION_ANALYSIS.md
│   │   ├── COPY_PASTE_DETECTOR_SUMMARY.md
│   │   ├── FRESH_BENCHMARK_RESULTS_2025.md
│   │   ├── PERFORMANCE_BENCHMARKS.md
│   │   ├── PROJECT_STATUS.md
│   │   ├── RANGE_OPTIMIZATION_SUMMARY.md
│   │   ├── RANGE_QUERY_OPTIMIZATION_PLAN.md
│   │   ├── TEST_RELIABILITY_PLAN.md
│   │   ├── UPDATED_COPY_PASTE_ANALYSIS.md
│   │   ├── arena-allocation-learnings.md
│   │   ├── arena_migration_plan.md
│   │   ├── claude_refactoring.md
│   │   ├── code_coverage_analysis.md
│   │   ├── codex_refactoring.md
│   │   ├── concurrency_locking_strategies.md
│   │   ├── optimal_capacity_analysis.md
│   │   ├── parallel_vectors_vs_entries.md
│   │   └── rust_performance_history.md
│   ├── examples/
│   │   ├── comprehensive_comparison.rs
│   │   ├── find_optimal_capacity.rs
│   │   ├── quick_perf.rs
│   │   ├── range_syntax_demo.rs
│   │   └── readme_examples.rs
│   ├── focused_results/
│   │   └── custom_analysis.rs
│   ├── profiling_results/
│   │   ├── analysis_report.md
│   │   └── timing_analysis.rs
│   ├── src/
│   │   ├── bin/
│   │   │   ├── arena_profile.rs
│   │   │   ├── bound_check_test.rs
│   │   │   ├── delete_profiler.rs
│   │   │   ├── detailed_delete_profiler.rs
│   │   │   ├── function_profiler.rs
│   │   │   ├── instruments_delete_target.rs
│   │   │   ├── large_delete_benchmark.rs
│   │   │   ├── micro_range_bench.rs
│   │   │   ├── profile_functions.rs
│   │   │   ├── range_comparison.rs
│   │   │   └── range_profile.rs
│   │   ├── compact_arena.rs
│   │   ├── comprehensive_performance_benchmark.rs
│   │   ├── construction.rs
│   │   ├── delete_operations.rs
│   │   ├── detailed_iterator_analysis.rs
│   │   ├── error.rs
│   │   ├── get_operations.rs
│   │   ├── insert_operations.rs
│   │   ├── iteration.rs
│   │   ├── lib.rs
│   │   ├── macros.rs
│   │   ├── node.rs
│   │   ├── range_queries.rs
│   │   ├── tree_structure.rs
│   │   ├── types.rs
│   │   └── validation.rs
│   ├── tests/
│   │   ├── adversarial_arena_corruption.rs
│   │   ├── adversarial_branch_rebalancing.rs
│   │   ├── adversarial_edge_cases.rs
│   │   ├── adversarial_linked_list.rs
│   │   ├── bplus_tree.rs
│   │   ├── bug_reproduction_tests.rs
│   │   ├── critical_bug_test.rs
│   │   ├── debug_infinite_loop.rs
│   │   ├── enhanced_error_handling.rs
│   │   ├── error_handling_consistency.rs
│   │   ├── fuzz_tests.rs
│   │   ├── linked_list_corruption_detection.rs
│   │   ├── memory_leak_detection.rs
│   │   ├── memory_safety_audit.rs
│   │   ├── range_bounds_syntax.rs
│   │   ├── range_differential.rs
│   │   ├── remove_operations.rs
│   │   ├── simple_bug_tests.rs
│   │   ├── specific_bug_demos.rs
│   │   └── test_utils.rs
│   └── tools/
│       └── parse_time_profile.py
├── rust-toolchain.toml
├── scripts/
│   ├── analyze_benchmarks.py
│   ├── instruments_export.sh
│   └── precommit.sh
├── simple_time_analysis.py
├── test_coverage_analysis.md
└── visualize_programming_time.py
Download .txt
SYMBOL INDEX (1173 symbols across 111 files)

FILE: analyze_programming_time.py
  function parse_git_log (line 15) | def parse_git_log(log_output):
  function calculate_programming_sessions (line 48) | def calculate_programming_sessions(commits, max_gap_minutes=120):
  function analyze_daily_programming (line 95) | def analyze_daily_programming(sessions):
  function create_visualizations (line 110) | def create_visualizations(sessions, daily_data):
  function print_summary (line 166) | def print_summary(sessions, daily_data):
  function main (line 210) | def main():

FILE: python/benchmarks/performance_benchmark.py
  class BenchmarkSuite (line 24) | class BenchmarkSuite:
    method __init__ (line 27) | def __init__(self, size: int = 10000):
    method time_operation (line 31) | def time_operation(self, name: str, operation):
    method benchmark_sequential_insertion (line 46) | def benchmark_sequential_insertion(self):
    method benchmark_random_insertion (line 57) | def benchmark_random_insertion(self):
    method benchmark_lookups (line 70) | def benchmark_lookups(self, tree: BPlusTreeMap):
    method benchmark_range_queries (line 81) | def benchmark_range_queries(self, tree: BPlusTreeMap):
    method benchmark_iteration (line 96) | def benchmark_iteration(self, tree: BPlusTreeMap):
    method benchmark_deletions (line 104) | def benchmark_deletions(self, tree: BPlusTreeMap):
    method benchmark_dict_comparison (line 115) | def benchmark_dict_comparison(self):
    method run_all_benchmarks (line 152) | def run_all_benchmarks(self):
  function format_results (line 187) | def format_results(results: Dict[str, Any]) -> str:
  function save_results (line 211) | def save_results(results: Dict[str, Any], filename: str = None):
  function main (line 230) | def main():

FILE: python/bplustree/__init__.py
  class BPlusTreeMap (line 19) | class BPlusTreeMap(_c_ext.BPlusTree):
    method __init__ (line 22) | def __init__(self, capacity=None):
    method get (line 29) | def get(self, key, default=None):
    method values (line 36) | def values(self):
    method clear (line 41) | def clear(self):
    method pop (line 51) | def pop(self, key, *args):
    method popitem (line 66) | def popitem(self):
    method setdefault (line 77) | def setdefault(self, key, default=None):
    method update (line 85) | def update(self, other):
    method copy (line 100) | def copy(self):
    method capacity (line 108) | def capacity(self):
    method root (line 113) | def root(self):
    method leaves (line 118) | def leaves(self):
  function get_implementation (line 131) | def get_implementation():

FILE: python/bplustree/bplus_tree.py
  class BPlusTreeError (line 21) | class BPlusTreeError(Exception):
  class InvalidCapacityError (line 27) | class InvalidCapacityError(BPlusTreeError):
  class BPlusTreeMap (line 33) | class BPlusTreeMap:
    method __init__ (line 58) | def __init__(self, capacity: int = DEFAULT_CAPACITY) -> None:
    method from_sorted_items (line 79) | def from_sorted_items(
    method _bulk_load_sorted (line 98) | def _bulk_load_sorted(self, items) -> None:
    method _insert_sorted_optimized (line 114) | def _insert_sorted_optimized(self, key: Any, value: Any) -> None:
    method _update_rightmost_leaf_cache (line 134) | def _update_rightmost_leaf_cache(self) -> None:
    method __setitem__ (line 141) | def __setitem__(self, key: Any, value: Any) -> None:
    method _insert_recursive (line 159) | def _insert_recursive(
    method _insert_into_leaf (line 180) | def _insert_into_leaf(
    method _insert_into_branch (line 199) | def _insert_into_branch(
    method __getitem__ (line 211) | def __getitem__(self, key: Any) -> Any:
    method get (line 221) | def get(self, key: Any, default: Any = None) -> Any:
    method __contains__ (line 238) | def __contains__(self, key: Any) -> bool:
    method __len__ (line 247) | def __len__(self) -> int:
    method __bool__ (line 251) | def __bool__(self) -> bool:
    method __delitem__ (line 255) | def __delitem__(self, key: Any) -> None:
    method _delete_recursive (line 261) | def _delete_recursive(self, node: "Node", key: Any) -> bool:
    method _handle_underflow (line 291) | def _handle_underflow(self, parent: "BranchNode", child_index: int) ->...
    method _redistribute_from_left (line 325) | def _redistribute_from_left(self, parent: "BranchNode", child_index: i...
    method _redistribute_from_right (line 341) | def _redistribute_from_right(self, parent: "BranchNode", child_index: ...
    method _merge_with_sibling (line 357) | def _merge_with_sibling(self, parent: "BranchNode", child_index: int) ...
    method _delete_from_leaf (line 444) | def _delete_from_leaf(self, leaf: "LeafNode", key: Any) -> bool:
    method keys (line 449) | def keys(self, start_key=None, end_key=None) -> Iterator[Any]:
    method values (line 454) | def values(self, start_key=None, end_key=None) -> Iterator[Any]:
    method items (line 459) | def items(self, start_key=None, end_key=None) -> Iterator[Tuple[Any, A...
    method _find_leaf_for_key (line 480) | def _find_leaf_for_key(self, key: Any) -> Optional["LeafNode"]:
    method _find_position_in_leaf (line 484) | def _find_position_in_leaf(self, leaf: "LeafNode", key: Any) -> int:
    method range (line 496) | def range(
    method clear (line 514) | def clear(self) -> None:
    method pop (line 522) | def pop(self, key: Any, *args) -> Any:
    method popitem (line 547) | def popitem(self) -> Tuple[Any, Any]:
    method setdefault (line 569) | def setdefault(self, key: Any, default: Any = None) -> Any:
    method update (line 585) | def update(self, other) -> None:
    method copy (line 604) | def copy(self) -> "BPlusTreeMap":
    method leaf_count (line 617) | def leaf_count(self) -> int:
    method _count_total_nodes (line 626) | def _count_total_nodes(self) -> int:
  class Node (line 640) | class Node(ABC):
    method is_leaf (line 649) | def is_leaf(self) -> bool:
    method is_full (line 654) | def is_full(self) -> bool:
    method __len__ (line 659) | def __len__(self) -> int:
    method is_underfull (line 664) | def is_underfull(self) -> bool:
  class LeafNode (line 669) | class LeafNode(Node):
    method __init__ (line 682) | def __init__(self, capacity: int):
    method is_leaf (line 688) | def is_leaf(self) -> bool:
    method is_full (line 691) | def is_full(self) -> bool:
    method __len__ (line 694) | def __len__(self) -> int:
    method is_underfull (line 697) | def is_underfull(self) -> bool:
    method can_donate (line 702) | def can_donate(self) -> bool:
    method borrow_from_left (line 707) | def borrow_from_left(self, left_sibling: "LeafNode") -> None:
    method borrow_from_right (line 717) | def borrow_from_right(self, right_sibling: "LeafNode") -> None:
    method merge_with_right (line 727) | def merge_with_right(self, right_sibling: "LeafNode") -> None:
    method find_position (line 736) | def find_position(self, key: Any) -> Tuple[int, bool]:
    method insert (line 746) | def insert(self, key: Any, value: Any) -> Optional[Any]:
    method get (line 763) | def get(self, key: Any) -> Optional[Any]:
    method delete (line 770) | def delete(self, key: Any) -> Optional[Any]:
    method split (line 778) | def split(self) -> "LeafNode":
    method split_and_insert (line 800) | def split_and_insert(self, key: Any, value: Any) -> Tuple["LeafNode", ...
    method find_leaf_for_key (line 812) | def find_leaf_for_key(self, _key: Any) -> "LeafNode":
    method key_count (line 816) | def key_count(self) -> int:
  class BranchNode (line 821) | class BranchNode(Node):
    method __init__ (line 838) | def __init__(self, capacity: int):
    method is_leaf (line 843) | def is_leaf(self) -> bool:
    method is_full (line 846) | def is_full(self) -> bool:
    method __len__ (line 849) | def __len__(self) -> int:
    method is_underfull (line 852) | def is_underfull(self) -> bool:
    method can_donate (line 857) | def can_donate(self) -> bool:
    method borrow_from_left (line 862) | def borrow_from_left(self, left_sibling: "BranchNode", separator_key: ...
    method borrow_from_right (line 877) | def borrow_from_right(self, right_sibling: "BranchNode", separator_key...
    method merge_with_right (line 892) | def merge_with_right(self, right_sibling: "BranchNode", separator_key:...
    method find_child_index (line 901) | def find_child_index(self, key: Any) -> int:
    method get_child (line 924) | def get_child(self, key: Any) -> Node:
    method split (line 935) | def split(self) -> "BranchNode":
    method insert_child_and_split_if_needed (line 958) | def insert_child_and_split_if_needed(
    method find_leaf_for_key (line 973) | def find_leaf_for_key(self, key: Any) -> "LeafNode":

FILE: python/bplustree_c_src/bplustree.h
  type NodeType (line 32) | typedef enum {
  type BPlusNode (line 38) | typedef struct BPlusNode BPlusNode;
  type BPlusTree (line 39) | typedef struct BPlusTree BPlusTree;
  type BPlusNode (line 48) | typedef struct BPlusNode {
  type BPlusTree (line 66) | typedef struct BPlusTree {
  function PyObject (line 78) | static inline PyObject* node_get_key(BPlusNode *node, int index) {
  function PyObject (line 82) | static inline PyObject* node_get_value(BPlusNode *node, int index) {
  function BPlusNode (line 86) | static inline BPlusNode* node_get_child(BPlusNode *node, int index) {
  function node_set_key (line 90) | static inline void node_set_key(BPlusNode *node, int index, PyObject *ke...
  function node_set_value (line 94) | static inline void node_set_value(BPlusNode *node, int index, PyObject *...
  function node_set_child (line 98) | static inline void node_set_child(BPlusNode *node, int index, BPlusNode ...
  function BPlusNode (line 103) | static inline BPlusNode *node_prefetch_child(BPlusNode *node, int index) {

FILE: python/bplustree_c_src/bplustree_module.c
  function PyObject (line 22) | PyObject *
  function BPlusTree_init (line 37) | int
  function BPlusTree_dealloc (line 67) | void
  function PyObject (line 77) | PyObject *
  function BPlusTree_setitem (line 83) | int
  function BPlusTree_delitem (line 91) | int
  function Py_ssize_t (line 104) | Py_ssize_t
  function BPlusTree_contains (line 109) | int
  type BPlusTreeIterator (line 123) | typedef struct {
  function BPlusTreeIterator_dealloc (line 132) | static void
  function PyObject (line 138) | static PyObject *
  function PyObject (line 212) | static PyObject *
  function PyObject (line 237) | static PyObject *
  function PyObject (line 242) | static PyObject *
  function PyObject (line 287) | static PyObject *
  function node_gc_op (line 326) | static int
  function node_traverse (line 360) | static int
  function node_clear_gc (line 366) | static int
  function BPlusTree_traverse (line 373) | static int
  function BPlusTree_clear (line 384) | static int
  function PyMODINIT_FUNC (line 434) | PyMODINIT_FUNC

FILE: python/bplustree_c_src/node_ops.c
  function fast_compare_lt (line 17) | int fast_compare_lt(PyObject *a, PyObject *b) {
  function fast_compare_eq (line 43) | int fast_compare_eq(PyObject *a, PyObject *b) {
  function node_find_position (line 68) | int node_find_position(BPlusNode *node, PyObject *key) {
  function BPlusNode (line 92) | BPlusNode* node_create(NodeType type, uint16_t capacity) {
  function node_destroy (line 121) | void node_destroy(BPlusNode *node) {
  function node_clear_slot (line 148) | static void node_clear_slot(BPlusNode *node, int i) {
  function node_insert_leaf (line 172) | int node_insert_leaf(BPlusNode *node, PyObject *key, PyObject *value,
  function node_delete (line 290) | int node_delete(BPlusNode *node, PyObject *key) {
  function PyObject (line 326) | PyObject* node_get(BPlusNode *node, PyObject *key) {
  function cache_aligned_free (line 360) | void cache_aligned_free(void* ptr) {

FILE: python/bplustree_c_src/tree_ops.c
  function BPlusNode (line 11) | BPlusNode* tree_find_leaf(BPlusTree *tree, PyObject *key) {
  function tree_insert_recursive (line 43) | static int tree_insert_recursive(BPlusNode *node, PyObject *key, PyObjec...
  function tree_insert (line 78) | int tree_insert(BPlusTree *tree, PyObject *key, PyObject *value) {
  function tree_delete (line 114) | int tree_delete(BPlusTree *tree, PyObject *key) {
  function PyObject (line 128) | PyObject* tree_get(BPlusTree *tree, PyObject *key) {
  function node_insert_branch (line 135) | int node_insert_branch(BPlusNode *node, PyObject *key, BPlusNode *right_...

FILE: python/examples/basic_usage.py
  function main (line 19) | def main():

FILE: python/examples/migration_guide.py
  function demo_dict_migration (line 19) | def demo_dict_migration():
  function demo_sorteddict_migration (line 67) | def demo_sorteddict_migration():
  function demo_api_compatibility (line 103) | def demo_api_compatibility():
  function demo_performance_benefits (line 154) | def demo_performance_benefits():
  function demo_gotchas_and_tips (line 185) | def demo_gotchas_and_tips():
  function demo_real_world_migration (line 223) | def demo_real_world_migration():
  function main (line 270) | def main():

FILE: python/examples/performance_demo.py
  function benchmark_function (line 32) | def benchmark_function(func, *args, **kwargs):
  function create_test_data (line 40) | def create_test_data(size):
  function benchmark_range_queries (line 45) | def benchmark_range_queries():
  function benchmark_iteration (line 123) | def benchmark_iteration():
  function benchmark_insertion (line 175) | def benchmark_insertion():
  function benchmark_memory_usage (line 221) | def benchmark_memory_usage():
  function demonstrate_early_termination (line 246) | def demonstrate_early_termination():
  function capacity_tuning_demo (line 291) | def capacity_tuning_demo():
  function main (line 323) | def main():

FILE: python/examples/range_queries.py
  function demo_basic_range_queries (line 21) | def demo_basic_range_queries():
  function demo_practical_use_cases (line 67) | def demo_practical_use_cases():
  function demo_pagination_pattern (line 132) | def demo_pagination_pattern():
  function demo_performance_comparison (line 182) | def demo_performance_comparison():
  function main (line 238) | def main():

FILE: python/setup.py
  function get_version (line 15) | def get_version():
  function get_long_description (line 26) | def get_long_description():

FILE: python/tests/_invariant_checker.py
  class BPlusTreeInvariantChecker (line 24) | class BPlusTreeInvariantChecker:
    method __init__ (line 32) | def __init__(self, capacity: int):
    method check_invariants (line 35) | def check_invariants(
    method _check_keys_ascending (line 89) | def _check_keys_ascending(self, node: "Node") -> bool:
    method _check_min_occupancy (line 117) | def _check_min_occupancy(self, node: "Node", is_root: bool = False) ->...
    method _check_max_occupancy (line 143) | def _check_max_occupancy(self, node: "Node") -> bool:
    method _check_branch_structure (line 160) | def _check_branch_structure(self, node: "Node") -> bool:
    method _check_leaf_consistency (line 184) | def _check_leaf_consistency(self, node: "Node") -> bool:
    method _check_leaf_ordering (line 205) | def _check_leaf_ordering(self, leaves_head: "LeafNode") -> bool:
    method _check_uniform_depth (line 222) | def _check_uniform_depth(self, node: "Node") -> bool:
    method _get_leaf_depths (line 236) | def _get_leaf_depths(
    method _find_root (line 257) | def _find_root(self, node: "Node") -> "Node":
    method count_nodes_per_level (line 262) | def count_nodes_per_level(self, node: "Node") -> List[int]:
    method get_tree_stats (line 289) | def get_tree_stats(self, node: "Node") -> dict:
    method _count_total_keys (line 318) | def _count_total_keys(self, node: "Node") -> int:
    method _count_total_nodes (line 330) | def _count_total_nodes(self, node: "Node") -> int:

FILE: python/tests/comprehensive_fuzz_test.py
  function run_capacity_sweep (line 18) | def run_capacity_sweep():
  function run_stress_test (line 152) | def run_stress_test():
  function run_edge_case_tests (line 185) | def run_edge_case_tests():

FILE: python/tests/fuzz_test.py
  function check_invariants (line 27) | def check_invariants(tree: BPlusTreeMap) -> bool:
  class BPlusTreeFuzzTester (line 33) | class BPlusTreeFuzzTester:
    method __init__ (line 36) | def __init__(self, capacity: int = 16, seed: int = None, prepopulate: ...
    method log_operation (line 66) | def log_operation(
    method _prepopulate_tree (line 74) | def _prepopulate_tree(self, count: int) -> None:
    method _calculate_tree_depth (line 122) | def _calculate_tree_depth(self) -> int:
    method verify_consistency (line 134) | def verify_consistency(self) -> bool:
    method _get_all_btree_keys (line 174) | def _get_all_btree_keys(self) -> List[Any]:
    method random_key (line 183) | def random_key(self, existing_bias: float = 0.7) -> Any:
    method random_value (line 190) | def random_value(self) -> str:
    method do_insert_or_update (line 194) | def do_insert_or_update(self):
    method do_delete (line 209) | def do_delete(self):
    method do_get (line 241) | def do_get(self):
    method do_batch_delete (line 269) | def do_batch_delete(self):
    method do_compact (line 312) | def do_compact(self):
    method run_fuzz_test (line 318) | def run_fuzz_test(self, num_operations: int = 1000000) -> bool:
    method _save_failure_info (line 387) | def _save_failure_info(self, failed_at: int):
  function run_quick_fuzz_test (line 473) | def run_quick_fuzz_test():
  function run_full_fuzz_test (line 481) | def run_full_fuzz_test():
  function run_complex_structure_test (line 489) | def run_complex_structure_test():
  function run_varied_capacity_tests (line 505) | def run_varied_capacity_tests():

FILE: python/tests/test_bplus_tree.py
  function check_invariants (line 10) | def check_invariants(tree: BPlusTreeMap) -> bool:
  class TestBasicOperations (line 16) | class TestBasicOperations:
    method test_create_empty_tree (line 19) | def test_create_empty_tree(self):
    method test_insert_and_get_single_item (line 26) | def test_insert_and_get_single_item(self):
    method test_insert_multiple_items (line 37) | def test_insert_multiple_items(self):
    method test_update_existing_key (line 50) | def test_update_existing_key(self):
    method test_contains_operator (line 60) | def test_contains_operator(self):
    method test_get_with_default (line 71) | def test_get_with_default(self):
    method test_key_error_on_missing_key (line 81) | def test_key_error_on_missing_key(self):
  class TestSetItemSplitting (line 92) | class TestSetItemSplitting:
    method test_overflow (line 95) | def test_overflow(self):
    method test_split_then_add (line 114) | def test_split_then_add(self):
    method test_many_insertions_maintain_invariants (line 144) | def test_many_insertions_maintain_invariants(self):
    method test_parent_splitting (line 158) | def test_parent_splitting(self):
  class TestLeafNode (line 186) | class TestLeafNode:
    method test_leaf_node_creation (line 189) | def test_leaf_node_creation(self):
    method test_leaf_node_insert (line 196) | def test_leaf_node_insert(self):
    method test_leaf_node_full (line 220) | def test_leaf_node_full(self):
    method test_leaf_find_position (line 231) | def test_leaf_find_position(self):
  class TestRemoval (line 250) | class TestRemoval:
    method test_remove_single_item_from_leaf_root (line 253) | def test_remove_single_item_from_leaf_root(self):
    method test_remove_multiple_items_from_leaf_root (line 270) | def test_remove_multiple_items_from_leaf_root(self):
    method test_remove_nonexistent_key_raises_error (line 306) | def test_remove_nonexistent_key_raises_error(self):
    method test_remove_from_tree_with_branch_root (line 322) | def test_remove_from_tree_with_branch_root(self):
    method test_remove_multiple_from_tree_with_branches (line 346) | def test_remove_multiple_from_tree_with_branches(self):
    method test_collapse_root_when_empty (line 375) | def test_collapse_root_when_empty(self):
  class TestNodeUnderflow (line 402) | class TestNodeUnderflow:
    method test_leaf_underflow_detection (line 405) | def test_leaf_underflow_detection(self):
    method test_branch_underflow_detection (line 424) | def test_branch_underflow_detection(self):
    method test_underflow_after_deletion_creates_violation (line 443) | def test_underflow_after_deletion_creates_violation(self):
    method test_deletion_can_violate_underflow_invariant (line 465) | def test_deletion_can_violate_underflow_invariant(self):
    method _tree_has_underflow (line 493) | def _tree_has_underflow(self, tree) -> bool:
  class TestBranchNode (line 512) | class TestBranchNode:
    method test_branch_node_creation (line 515) | def test_branch_node_creation(self):
    method test_find_child_index (line 522) | def test_find_child_index(self):
    method test_branch_node_split (line 540) | def test_branch_node_split(self):
  class TestSiblingRedistribution (line 559) | class TestSiblingRedistribution:
    method test_leaf_can_donate (line 562) | def test_leaf_can_donate(self):
    method test_branch_can_donate (line 584) | def test_branch_can_donate(self):
    method test_leaf_borrow_from_left (line 606) | def test_leaf_borrow_from_left(self):
    method test_leaf_borrow_from_right (line 628) | def test_leaf_borrow_from_right(self):
    method test_branch_borrow_from_left (line 650) | def test_branch_borrow_from_left(self):
    method test_branch_borrow_from_right (line 673) | def test_branch_borrow_from_right(self):
    method test_redistribution_during_deletion (line 696) | def test_redistribution_during_deletion(self):
    method test_actual_redistribution_scenario (line 720) | def test_actual_redistribution_scenario(self):
    method test_forced_redistribution_scenario (line 745) | def test_forced_redistribution_scenario(self):
  class TestNodeMerging (line 778) | class TestNodeMerging:
    method test_leaf_merge_with_right (line 781) | def test_leaf_merge_with_right(self):
    method test_branch_merge_with_right (line 805) | def test_branch_merge_with_right(self):
    method test_merging_during_deletion_creates_balanced_tree (line 825) | def test_merging_during_deletion_creates_balanced_tree(self):
    method test_cascade_merging (line 852) | def test_cascade_merging(self):
    method test_merge_vs_redistribute_preference (line 880) | def test_merge_vs_redistribute_preference(self):

FILE: python/tests/test_c_extension.py
  function test_c_extension_basic (line 32) | def test_c_extension_basic():
  function test_c_extension_performance (line 74) | def test_c_extension_performance():
  function test_stress_c_extension (line 174) | def test_stress_c_extension():

FILE: python/tests/test_c_extension_comprehensive.py
  function test_empty_tree (line 20) | def test_empty_tree():
  function test_single_item (line 44) | def test_single_item():
  function test_sequential_insert_small (line 63) | def test_sequential_insert_small():
  function test_random_insert_small (line 97) | def test_random_insert_small():
  function test_duplicate_keys (line 131) | def test_duplicate_keys():
  function test_key_error (line 155) | def test_key_error():
  function test_iteration_order (line 175) | def test_iteration_order():
  function test_large_capacity (line 204) | def test_large_capacity():
  function test_string_keys (line 224) | def test_string_keys():
  function test_mixed_types (line 249) | def test_mixed_types():
  function run_all_tests (line 269) | def run_all_tests():

FILE: python/tests/test_c_extension_segfault_fix.py
  class TestCExtensionSegfaultFix (line 17) | class TestCExtensionSegfaultFix:
    method test_sequential_insertion_no_segfault (line 20) | def test_sequential_insertion_no_segfault(self):
    method test_random_insertion_no_segfault (line 46) | def test_random_insertion_no_segfault(self):
    method test_deletion_after_splits_no_segfault (line 66) | def test_deletion_after_splits_no_segfault(self):
    method test_iteration_after_splits_no_segfault (line 89) | def test_iteration_after_splits_no_segfault(self):
    method test_concurrent_modification_safety (line 110) | def test_concurrent_modification_safety(self):
    method test_memory_stress_test (line 144) | def test_memory_stress_test(self):

FILE: python/tests/test_compile_flags.py
  function test_no_unsafe_compile_flags (line 5) | def test_no_unsafe_compile_flags():

FILE: python/tests/test_data_alignment.py
  function test_data_alignment_default (line 9) | def test_data_alignment_default():
  function test_data_alignment_various_capacities (line 16) | def test_data_alignment_various_capacities():

FILE: python/tests/test_dictionary_api.py
  class TestDictionaryAPI (line 25) | class TestDictionaryAPI:
    method setup_method (line 28) | def setup_method(self):
    method test_clear (line 35) | def test_clear(self):
    method test_get_with_default (line 54) | def test_get_with_default(self):
    method test_pop_with_key_present (line 68) | def test_pop_with_key_present(self):
    method test_pop_with_key_missing_no_default (line 82) | def test_pop_with_key_missing_no_default(self):
    method test_pop_with_key_missing_with_default (line 91) | def test_pop_with_key_missing_with_default(self):
    method test_pop_argument_validation (line 101) | def test_pop_argument_validation(self):
    method test_popitem_with_data (line 107) | def test_popitem_with_data(self):
    method test_popitem_empty_tree (line 122) | def test_popitem_empty_tree(self):
    method test_popitem_until_empty (line 129) | def test_popitem_until_empty(self):
    method test_setdefault_new_key (line 146) | def test_setdefault_new_key(self):
    method test_setdefault_existing_key (line 155) | def test_setdefault_existing_key(self):
    method test_setdefault_none_default (line 164) | def test_setdefault_none_default(self):
    method test_update_with_dict (line 172) | def test_update_with_dict(self):
    method test_update_with_another_bplustree (line 188) | def test_update_with_another_bplustree(self):
    method test_update_with_iterable_of_pairs (line 207) | def test_update_with_iterable_of_pairs(self):
    method test_update_with_generator (line 223) | def test_update_with_generator(self):
    method test_copy (line 238) | def test_copy(self):
    method test_copy_empty_tree (line 263) | def test_copy_empty_tree(self):
    method test_dict_compatibility (line 272) | def test_dict_compatibility(self):
    method test_edge_cases (line 300) | def test_edge_cases(self):
    method test_method_chaining_compatibility (line 322) | def test_method_chaining_compatibility(self):
  class TestDictionaryAPILargeDataset (line 333) | class TestDictionaryAPILargeDataset:
    method test_large_dataset_operations (line 336) | def test_large_dataset_operations(self):

FILE: python/tests/test_docstyle.py
  function test_pydocstyle_conformance (line 8) | def test_pydocstyle_conformance():

FILE: python/tests/test_fuzz_discovered_patterns.py
  function check_invariants (line 19) | def check_invariants(tree: BPlusTreeMap) -> bool:
  class TestFuzzDiscoveredPatterns (line 25) | class TestFuzzDiscoveredPatterns:
    method test_rapid_deletion_followed_by_insertion (line 28) | def test_rapid_deletion_followed_by_insertion(self):
    method test_mixed_operations_stress_pattern (line 108) | def test_mixed_operations_stress_pattern(self):
    method test_high_capacity_rapid_operations (line 180) | def test_high_capacity_rapid_operations(self):
    method test_small_capacity_stress_pattern (line 238) | def test_small_capacity_stress_pattern(self):

FILE: python/tests/test_gc_support.py
  function test_gc_collects_self_referencing_tree (line 10) | def test_gc_collects_self_referencing_tree():

FILE: python/tests/test_gprof_harness.py
  function test_generate_gprof (line 17) | def test_generate_gprof(tmp_path):

FILE: python/tests/test_import_error_fallback.py
  function test_extension_import_error_triggers_python_fallback (line 9) | def test_extension_import_error_triggers_python_fallback(tmp_path, monke...

FILE: python/tests/test_invariant_bug.py
  function check_invariants (line 10) | def check_invariants(tree: BPlusTreeMap) -> bool:
  function test_invariant_checker_catches_single_child (line 16) | def test_invariant_checker_catches_single_child():
  function _print_tree_structure (line 42) | def _print_tree_structure(node, level):

FILE: python/tests/test_iterator.py
  class TestBPlusTreeIterator (line 7) | class TestBPlusTreeIterator:
    method test_iterate_empty_tree (line 10) | def test_iterate_empty_tree(self):
    method test_iterate_single_item (line 16) | def test_iterate_single_item(self):
    method test_iterate_multiple_items_single_leaf (line 24) | def test_iterate_multiple_items_single_leaf(self):
    method test_iterate_multiple_leaves (line 35) | def test_iterate_multiple_leaves(self):
    method test_iterate_large_tree (line 46) | def test_iterate_large_tree(self):
    method test_keys_iterator (line 61) | def test_keys_iterator(self):
    method test_values_iterator (line 70) | def test_values_iterator(self):
  class TestBPlusTreeRangeIterator (line 80) | class TestBPlusTreeRangeIterator:
    method test_iterate_from_key (line 83) | def test_iterate_from_key(self):
    method test_iterate_until_key (line 93) | def test_iterate_until_key(self):
    method test_iterate_range (line 103) | def test_iterate_range(self):
    method test_iterate_from_nonexistent_key (line 113) | def test_iterate_from_nonexistent_key(self):
    method test_iterate_empty_range (line 124) | def test_iterate_empty_range(self):
    method test_iterate_range_beyond_tree (line 134) | def test_iterate_range_beyond_tree(self):
    method test_iterate_from_middle_of_leaf (line 144) | def test_iterate_from_middle_of_leaf(self):

FILE: python/tests/test_iterator_modification_safety.py
  class TestIteratorModificationSafety (line 22) | class TestIteratorModificationSafety:
    method test_iterator_invalidation_on_insertion (line 25) | def test_iterator_invalidation_on_insertion(self):
    method test_iterator_invalidation_on_deletion (line 50) | def test_iterator_invalidation_on_deletion(self):
    method test_iterator_invalidation_on_update (line 75) | def test_iterator_invalidation_on_update(self):
    method test_items_iterator_invalidation (line 100) | def test_items_iterator_invalidation(self):
    method test_multiple_iterators_invalidation (line 125) | def test_multiple_iterators_invalidation(self):
    method test_iterator_after_tree_modification (line 159) | def test_iterator_after_tree_modification(self):
    method test_list_keys_after_heavy_modification (line 188) | def test_list_keys_after_heavy_modification(self):
    method test_iteration_with_structural_changes (line 219) | def test_iteration_with_structural_changes(self):
    method test_concurrent_modification_detection (line 244) | def test_concurrent_modification_detection(self):
    method test_no_false_positives (line 273) | def test_no_false_positives(self):
    method test_modification_counter_wrapping (line 295) | def test_modification_counter_wrapping(self):

FILE: python/tests/test_leak_detection.py
  function test_no_memory_leak_on_insert_delete (line 9) | def test_no_memory_leak_on_insert_delete():

FILE: python/tests/test_max_occupancy_bug.py
  function check_invariants (line 8) | def check_invariants(tree: BPlusTreeMap) -> bool:
  class TestMaxOccupancyBug (line 14) | class TestMaxOccupancyBug:
    method test_small_tree_deletion_pattern (line 17) | def test_small_tree_deletion_pattern(self):
    method test_specific_deletion_sequence (line 43) | def test_specific_deletion_sequence(self):
    method test_root_accumulation (line 77) | def test_root_accumulation(self):
    method test_single_deletion_trigger (line 107) | def test_single_deletion_trigger(self):

FILE: python/tests/test_memory_leaks.py
  class TestMemoryLeaks (line 18) | class TestMemoryLeaks:
    method test_insertion_deletion_cycle_no_leak (line 21) | def test_insertion_deletion_cycle_no_leak(self):
    method test_deleted_values_are_released (line 50) | def test_deleted_values_are_released(self):
    method test_clear_releases_all_references (line 82) | def test_clear_releases_all_references(self):
    method test_tree_destruction_releases_nodes (line 103) | def test_tree_destruction_releases_nodes(self):
    method test_update_operations_no_leak (line 134) | def test_update_operations_no_leak(self):
    method test_copy_creates_independent_references (line 162) | def test_copy_creates_independent_references(self):
    method test_large_tree_memory_usage (line 191) | def test_large_tree_memory_usage(self):
    method test_iterator_cleanup (line 211) | def test_iterator_cleanup(self):
    method test_circular_reference_handling (line 242) | def test_circular_reference_handling(self):

FILE: python/tests/test_multithreaded_lookup.py
  function test_multithreaded_lookup (line 23) | def test_multithreaded_lookup():

FILE: python/tests/test_no_segfaults.py
  class TestNoSegfaults (line 22) | class TestNoSegfaults:
    method test_large_sequential_insert (line 25) | def test_large_sequential_insert(self):
    method test_large_random_insert (line 43) | def test_large_random_insert(self):
    method test_mixed_operations_large (line 68) | def test_mixed_operations_large(self):
    method test_stress_with_iterations (line 109) | def test_stress_with_iterations(self):
    method test_capacity_edge_cases (line 142) | def test_capacity_edge_cases(self):
    method test_boundary_values (line 165) | def test_boundary_values(self):
    method test_memory_pressure (line 191) | def test_memory_pressure(self):
  function test_no_segfaults (line 217) | def test_no_segfaults():

FILE: python/tests/test_node_split_minimal.py
  function test_single_node_split_maintains_order (line 20) | def test_single_node_split_maintains_order():
  function test_two_splits_maintains_order (line 46) | def test_two_splits_maintains_order():

FILE: python/tests/test_optimized_bplus_tree.py
  class OptimizedLeafNode (line 19) | class OptimizedLeafNode:
    method __init__ (line 22) | def __init__(self, capacity: int):
    method is_leaf (line 29) | def is_leaf(self) -> bool:
    method find_position (line 32) | def find_position(self, key) -> int:
    method get_child (line 36) | def get_child(self, key) -> "OptimizedLeafNode":
    method insert (line 40) | def insert(self, key, value) -> Optional[Tuple[Any, "OptimizedLeafNode...
    method _split_and_insert (line 68) | def _split_and_insert(
    method get (line 116) | def get(self, key) -> Optional[Any]:
  class OptimizedBranchNode (line 124) | class OptimizedBranchNode:
    method __init__ (line 127) | def __init__(self, capacity: int):
    method is_leaf (line 133) | def is_leaf(self) -> bool:
    method find_child_index (line 136) | def find_child_index(self, key) -> int:
    method get_child (line 140) | def get_child(self, key):
    method set_child (line 145) | def set_child(self, index: int, child):
    method insert (line 149) | def insert(self, key, right_child) -> Optional[Tuple[Any, "OptimizedBr...
    method _split_and_insert (line 174) | def _split_and_insert(
  class OptimizedBPlusTree (line 228) | class OptimizedBPlusTree:
    method __init__ (line 231) | def __init__(self, capacity: int = 128):
    method __getitem__ (line 236) | def __getitem__(self, key) -> Any:
    method __setitem__ (line 247) | def __setitem__(self, key, value):
    method _insert_recursive (line 258) | def _insert_recursive(self, node, key, value) -> Optional[Tuple]:
    method items (line 269) | def items(self, start_key=None, end_key=None) -> Iterator[Tuple[Any, A...
  function test_optimized_performance (line 295) | def test_optimized_performance():

FILE: python/tests/test_performance_baseline.py
  class PerformanceBaseline (line 18) | class PerformanceBaseline:
    method __init__ (line 21) | def __init__(self, tree_size: int = 10000, order: int = 128):
    method measure_operation (line 28) | def measure_operation(self, operation, iterations: int = 1) -> Tuple[f...
    method test_sequential_insert (line 43) | def test_sequential_insert(self) -> Dict[str, float]:
    method test_random_insert (line 59) | def test_random_insert(self) -> Dict[str, float]:
    method test_lookup_performance (line 75) | def test_lookup_performance(self) -> Dict[str, float]:
    method test_range_query (line 96) | def test_range_query(self) -> Dict[str, float]:
    method run_all_tests (line 120) | def run_all_tests(self) -> Dict[str, Dict[str, float]]:
  function test_baseline_performance (line 131) | def test_baseline_performance():

FILE: python/tests/test_performance_benchmarks.py
  class TestPerformanceBenchmarks (line 21) | class TestPerformanceBenchmarks:
    method test_insertion_performance_small (line 24) | def test_insertion_performance_small(self):
    method test_insertion_performance_medium (line 42) | def test_insertion_performance_medium(self):
    method test_bulk_loading_performance (line 62) | def test_bulk_loading_performance(self):
    method test_lookup_performance (line 88) | def test_lookup_performance(self):
    method test_range_query_performance (line 114) | def test_range_query_performance(self):
    method test_mixed_workload_performance (line 141) | def test_mixed_workload_performance(self):
    method test_capacity_impact_on_performance (line 188) | def test_capacity_impact_on_performance(self):
    method test_memory_efficiency (line 212) | def test_memory_efficiency(self):
    method test_sequential_vs_random_insertion (line 237) | def test_sequential_vs_random_insertion(self):
    method test_large_dataset_scalability (line 272) | def test_large_dataset_scalability(self):
    method test_stress_performance (line 305) | def test_stress_performance(self):
  class TestPerformanceRegression (line 343) | class TestPerformanceRegression:
    method test_baseline_insertion_performance (line 346) | def test_baseline_insertion_performance(self):
    method test_baseline_lookup_performance (line 364) | def test_baseline_lookup_performance(self):
    method test_memory_usage_baseline (line 387) | def test_memory_usage_baseline(self):

FILE: python/tests/test_performance_regression.py
  function time_it (line 19) | def time_it() -> float:
  class TestPerformanceRegression (line 25) | class TestPerformanceRegression:
    method generate_test_data (line 35) | def generate_test_data(self, size: int) -> List[Tuple[int, str]]:
    method test_insertion_performance (line 39) | def test_insertion_performance(self):
    method test_sequential_vs_random_insertion (line 53) | def test_sequential_vs_random_insertion(self):
    method test_lookup_performance (line 80) | def test_lookup_performance(self):
    method test_deletion_performance (line 99) | def test_deletion_performance(self):
    method test_iteration_performance (line 118) | def test_iteration_performance(self):
    method test_range_query_performance (line 137) | def test_range_query_performance(self):
    method test_mixed_operations_performance (line 159) | def test_mixed_operations_performance(self):
    method test_performance_scales_logarithmically (line 188) | def test_performance_scales_logarithmically(self):
    method test_memory_efficiency (line 214) | def test_memory_efficiency(self):
  class TestPerformanceComparison (line 234) | class TestPerformanceComparison:
    method test_insertion_comparable_to_dict (line 237) | def test_insertion_comparable_to_dict(self):
    method test_ordered_iteration_faster_than_sorted_dict (line 263) | def test_ordered_iteration_faster_than_sorted_dict(self):

FILE: python/tests/test_performance_vs_sorteddict.py
  class PerformanceComparison (line 28) | class PerformanceComparison:
    method __init__ (line 31) | def __init__(self, size: int = 10000):
    method measure_operation (line 37) | def measure_operation(self, operation, iterations: int = 1) -> float:
    method compare_lookup (line 51) | def compare_lookup(self) -> Dict[str, float]:
    method compare_insert (line 81) | def compare_insert(self) -> Dict[str, float]:
    method compare_range_query (line 104) | def compare_range_query(self) -> Dict[str, float]:
  function test_performance_comparison (line 142) | def test_performance_comparison():

FILE: python/tests/test_prefetch_microbench.py
  function test_prefetch_microbench (line 30) | def test_prefetch_microbench():

FILE: python/tests/test_proper_deletion.py
  function check_invariants (line 10) | def check_invariants(tree: BPlusTreeMap) -> bool:
  function test_deletion_maintains_invariants (line 16) | def test_deletion_maintains_invariants():
  function test_specific_problematic_case (line 52) | def test_specific_problematic_case():
  function test_merge_vs_redistribute (line 81) | def test_merge_vs_redistribute():
  function _print_structure (line 100) | def _print_structure(node, level):

FILE: python/tests/test_segfault_regression.py
  function test_no_segfault_on_large_operations (line 20) | def test_no_segfault_on_large_operations():
  function test_no_segfault_multiple_trees (line 53) | def test_no_segfault_multiple_trees():
  function test_no_segfault_stress_iterations (line 72) | def test_no_segfault_stress_iterations():

FILE: python/tests/test_single_array_int_optimization.py
  class IntArrayLeafNode (line 16) | class IntArrayLeafNode:
    method __init__ (line 19) | def __init__(self, capacity: int = 128):
    method find_position (line 27) | def find_position(self, key: int) -> int:
    method insert (line 38) | def insert(self, key: int, value: int) -> bool:
    method lookup (line 66) | def lookup(self, key: int) -> int:
  class TwoArrayLeafNode (line 74) | class TwoArrayLeafNode:
    method __init__ (line 77) | def __init__(self, capacity: int = 128):
    method find_position (line 83) | def find_position(self, key: int) -> int:
    method insert (line 94) | def insert(self, key: int, value: int) -> bool:
    method lookup (line 112) | def lookup(self, key: int) -> int:
  function benchmark_int_arrays (line 120) | def benchmark_int_arrays(size: int = 64, iterations: int = 10000):
  function test_single_array_int_optimization (line 269) | def test_single_array_int_optimization():

FILE: python/tests/test_single_child_parent.py
  function test_single_child_parent_handled (line 10) | def test_single_child_parent_handled():

FILE: python/tests/test_stress_edge_cases.py
  function check_invariants (line 13) | def check_invariants(tree: BPlusTreeMap) -> bool:
  class TestStressEdgeCases (line 19) | class TestStressEdgeCases:
    method test_minimum_capacity_heavy_deletion (line 22) | def test_minimum_capacity_heavy_deletion(self):
    method test_alternating_insert_delete_stress (line 49) | def test_alternating_insert_delete_stress(self):
    method test_large_capacity_edge_cases (line 72) | def test_large_capacity_edge_cases(self):
    method test_sequential_vs_random_patterns (line 93) | def test_sequential_vs_random_patterns(self):
    method test_duplicate_key_operations (line 118) | def test_duplicate_key_operations(self):
    method test_empty_tree_operations (line 140) | def test_empty_tree_operations(self):
    method test_capacity_boundary_conditions (line 165) | def test_capacity_boundary_conditions(self):
    method test_deep_tree_stress (line 191) | def test_deep_tree_stress(self):

FILE: python/tests/test_stress_large_datasets.py
  class TestLargeDatasets (line 17) | class TestLargeDatasets:
    method test_one_million_sequential_insertions (line 21) | def test_one_million_sequential_insertions(self):
    method test_one_million_random_insertions (line 48) | def test_one_million_random_insertions(self):
    method test_large_string_keys (line 79) | def test_large_string_keys(self):
    method test_large_value_objects (line 103) | def test_large_value_objects(self):
    method test_stress_mixed_operations (line 130) | def test_stress_mixed_operations(self):
    method test_range_queries_on_large_dataset (line 177) | def test_range_queries_on_large_dataset(self):
    method test_memory_efficiency_at_scale (line 205) | def test_memory_efficiency_at_scale(self):
    method test_persistence_pattern_simulation (line 237) | def test_persistence_pattern_simulation(self):

FILE: rust/benches/comparison.rs
  function bench_sequential_insertion (line 6) | fn bench_sequential_insertion(c: &mut Criterion) {
  function bench_random_insertion (line 33) | fn bench_random_insertion(c: &mut Criterion) {
  function bench_lookup (line 66) | fn bench_lookup(c: &mut Criterion) {
  function bench_iteration (line 110) | fn bench_iteration(c: &mut Criterion) {
  function bench_deletion (line 142) | fn bench_deletion(c: &mut Criterion) {
  function bench_mixed_operations (line 186) | fn bench_mixed_operations(c: &mut Criterion) {
  function bench_capacity_optimization (line 254) | fn bench_capacity_optimization(c: &mut Criterion) {
  function bench_range_queries (line 308) | fn bench_range_queries(c: &mut Criterion) {
  function bench_range_edge_cases (line 357) | fn bench_range_edge_cases(c: &mut Criterion) {

FILE: rust/benches/profiling_benchmark.rs
  function profile_balanced_workload (line 9) | fn profile_balanced_workload(c: &mut Criterion) {
  function profile_individual_operations (line 46) | fn profile_individual_operations(c: &mut Criterion) {
  function profile_tree_operations_breakdown (line 103) | fn profile_tree_operations_breakdown(c: &mut Criterion) {
  function profile_range_operations (line 163) | fn profile_range_operations(c: &mut Criterion) {
  function profile_memory_allocation_patterns (line 188) | fn profile_memory_allocation_patterns(c: &mut Criterion) {
  type Operation (line 223) | enum Operation {
  function generate_balanced_operations (line 229) | fn generate_balanced_operations(count: usize) -> Vec<Operation> {

FILE: rust/benches/quick_clone_bench.rs
  function benchmark_key_operations (line 4) | fn benchmark_key_operations(c: &mut Criterion) {

FILE: rust/benches/range_scan_profiling.rs
  function profile_large_range_scans (line 9) | fn profile_large_range_scans(c: &mut Criterion) {
  function profile_random_range_scans (line 63) | fn profile_random_range_scans(c: &mut Criterion) {
  function profile_range_iteration_patterns (line 117) | fn profile_range_iteration_patterns(c: &mut Criterion) {
  function profile_range_bounds_types (line 176) | fn profile_range_bounds_types(c: &mut Criterion) {
  function profile_very_large_single_scan (line 223) | fn profile_very_large_single_scan(c: &mut Criterion) {

FILE: rust/examples/comprehensive_comparison.rs
  type BenchmarkResult (line 9) | struct BenchmarkResult {
    method new (line 19) | fn new(
    method winner (line 39) | fn winner(&self) -> &str {
    method best_ratio (line 57) | fn best_ratio(&self) -> f64 {
  function run_benchmark (line 70) | fn run_benchmark<F>(_name: &str, iterations: usize, mut f: F) -> std::ti...
  function main (line 86) | fn main() {

FILE: rust/examples/find_optimal_capacity.rs
  constant ITERATIONS (line 5) | const ITERATIONS: usize = 10;
  constant INSERT_COUNT (line 6) | const INSERT_COUNT: usize = 10_000;
  constant LOOKUP_COUNT (line 7) | const LOOKUP_COUNT: usize = 100_000;
  constant ITER_COUNT (line 8) | const ITER_COUNT: usize = 100;
  function benchmark_capacity (line 10) | fn benchmark_capacity(capacity: usize) -> (Duration, Duration, Duration) {
  function benchmark_btreemap (line 54) | fn benchmark_btreemap() -> (Duration, Duration, Duration) {
  function main (line 98) | fn main() {

FILE: rust/examples/quick_perf.rs
  function main (line 5) | fn main() {

FILE: rust/examples/range_syntax_demo.rs
  function main (line 3) | fn main() {

FILE: rust/examples/readme_examples.rs
  function main (line 3) | fn main() {
  function quick_start_example (line 21) | fn quick_start_example() {
  function api_examples (line 42) | fn api_examples() {
  function range_query_examples (line 70) | fn range_query_examples() {
  function time_series_example (line 97) | fn time_series_example() {

FILE: rust/focused_results/custom_analysis.rs
  function main (line 4) | fn main() {
  function analyze_tree_navigation (line 13) | fn analyze_tree_navigation() {
  function analyze_iteration_patterns (line 37) | fn analyze_iteration_patterns() {
  function analyze_memory_access (line 65) | fn analyze_memory_access() {

FILE: rust/profiling_results/timing_analysis.rs
  function main (line 4) | fn main() {

FILE: rust/src/bin/arena_profile.rs
  function main (line 4) | fn main() {
  function test_single_operations (line 23) | fn test_single_operations(tree: &BPlusTreeMap<i32, String>) {
  function test_arena_lookups (line 67) | fn test_arena_lookups(tree: &BPlusTreeMap<i32, String>) {

FILE: rust/src/bin/bound_check_test.rs
  function main (line 4) | fn main() {

FILE: rust/src/bin/delete_profiler.rs
  function main (line 4) | fn main() {
  function profile_sequential_deletes (line 15) | fn profile_sequential_deletes() {
  function profile_pseudo_random_deletes (line 38) | fn profile_pseudo_random_deletes() {
  function profile_mixed_workload_deletes (line 68) | fn profile_mixed_workload_deletes() {
  function profile_rebalancing_heavy_deletes (line 119) | fn profile_rebalancing_heavy_deletes() {

FILE: rust/src/bin/detailed_delete_profiler.rs
  function main (line 4) | fn main() {
  function profile_delete_operations_detailed (line 12) | fn profile_delete_operations_detailed() {
  function profile_tree_size (line 34) | fn profile_tree_size(size: usize) {
  function profile_capacity (line 126) | fn profile_capacity(capacity: usize) {

FILE: rust/src/bin/function_profiler.rs
  type ProfileData (line 5) | struct ProfileData {
    method new (line 13) | fn new() -> Self {
    method record (line 22) | fn record(&mut self, duration: Duration) {
    method avg_time (line 29) | fn avg_time(&self) -> Duration {
  function main (line 38) | fn main() {
  function profile_delete_scenarios (line 46) | fn profile_delete_scenarios() {
  function profile_workload (line 61) | fn profile_workload(workload: Vec<Operation>) {
  type Operation (line 176) | enum Operation {
  function create_sequential_delete_workload (line 182) | fn create_sequential_delete_workload() -> Vec<Operation> {
  function create_random_delete_workload (line 193) | fn create_random_delete_workload() -> Vec<Operation> {
  function create_rebalancing_workload (line 207) | fn create_rebalancing_workload() -> Vec<Operation> {
  function create_mixed_workload (line 219) | fn create_mixed_workload() -> Vec<Operation> {

FILE: rust/src/bin/instruments_delete_target.rs
  function main (line 8) | fn main() {

FILE: rust/src/bin/large_delete_benchmark.rs
  function main (line 8) | fn main() {

FILE: rust/src/bin/micro_range_bench.rs
  function main (line 4) | fn main() {

FILE: rust/src/bin/profile_functions.rs
  function main (line 4) | fn main() {
  function profile_large_tree_operations (line 20) | fn profile_large_tree_operations(tree_size: usize, operations_count: usi...
  type Operation (line 279) | enum Operation {
  function generate_mixed_operations (line 285) | fn generate_mixed_operations(count: usize) -> Vec<Operation> {

FILE: rust/src/bin/range_comparison.rs
  function main (line 5) | fn main() {
  function test_range_sizes (line 51) | fn test_range_sizes(
  function test_range_positions (line 92) | fn test_range_positions(
  function test_startup_vs_iteration (line 140) | fn test_startup_vs_iteration(
  function test_creation_overhead (line 206) | fn test_creation_overhead(

FILE: rust/src/bin/range_profile.rs
  function main (line 4) | fn main() {
  function test_range_sizes (line 31) | fn test_range_sizes(tree: &BPlusTreeMap<i32, String>, tree_size: usize) {
  function test_range_positions (line 56) | fn test_range_positions(tree: &BPlusTreeMap<i32, String>, tree_size: usi...
  function test_range_vs_iteration_overhead (line 87) | fn test_range_vs_iteration_overhead(tree: &BPlusTreeMap<i32, String>, _t...
  function test_iterator_creation_cost (line 121) | fn test_iterator_creation_cost(tree: &BPlusTreeMap<i32, String>, tree_si...

FILE: rust/src/compact_arena.rs
  type NodeId (line 7) | pub type NodeId = u32;
  constant NULL_NODE (line 8) | pub const NULL_NODE: NodeId = u32::MAX;
  type CompactArenaStats (line 12) | pub struct CompactArenaStats {
  type CompactArena (line 23) | pub struct CompactArena<T> {
  function new (line 36) | pub fn new() -> Self {
  function with_capacity (line 46) | pub fn with_capacity(capacity: usize) -> Self {
  function allocate (line 57) | pub fn allocate(&mut self, item: T) -> NodeId {
  function deallocate (line 78) | pub fn deallocate(&mut self, id: NodeId) -> Option<T>
  function deallocate_no_return (line 103) | pub fn deallocate_no_return(&mut self, id: NodeId) -> bool {
  function get (line 123) | pub fn get(&self, id: NodeId) -> Option<&T> {
  function get_mut (line 140) | pub fn get_mut(&mut self, id: NodeId) -> Option<&mut T> {
  function get_unchecked (line 159) | pub unsafe fn get_unchecked(&self, id: NodeId) -> &T {
  function get_unchecked_mut (line 168) | pub unsafe fn get_unchecked_mut(&mut self, id: NodeId) -> &mut T {
  function contains (line 174) | pub fn contains(&self, id: NodeId) -> bool {
  function stats (line 184) | pub fn stats(&self) -> CompactArenaStats {
  function compact (line 213) | pub fn compact(&mut self)
  function len (line 245) | pub fn len(&self) -> usize {
  function is_empty (line 253) | pub fn is_empty(&self) -> bool {
  function capacity (line 258) | pub fn capacity(&self) -> usize {
  function clear (line 263) | pub fn clear(&mut self) {
  function free_count (line 271) | pub fn free_count(&self) -> usize {
  function allocated_count (line 276) | pub fn allocated_count(&self) -> usize {
  function utilization (line 281) | pub fn utilization(&self) -> f64 {
  method default (line 288) | fn default() -> Self {
  function deallocate_with_default (line 296) | pub fn deallocate_with_default(&mut self, id: NodeId) -> Option<T> {
  function allocate_leaf (line 332) | pub fn allocate_leaf(&mut self, leaf: LeafNode<K, V>) -> NodeId {
  function allocate_leaf_with_data (line 339) | pub fn allocate_leaf_with_data(
  function allocate_branch (line 357) | pub fn allocate_branch(&mut self, branch: BranchNode<K, V>) -> NodeId {
  function deallocate_leaf (line 363) | pub fn deallocate_leaf(&mut self, id: NodeId) -> Option<LeafNode<K, V>> {
  function deallocate_branch (line 369) | pub fn deallocate_branch(&mut self, id: NodeId) -> Option<BranchNode<K, ...
  function free_leaf_count (line 378) | pub fn free_leaf_count(&self) -> usize {
  function allocated_leaf_count (line 383) | pub fn allocated_leaf_count(&self) -> usize {
  function leaf_utilization (line 388) | pub fn leaf_utilization(&self) -> f64 {
  function free_branch_count (line 393) | pub fn free_branch_count(&self) -> usize {
  function allocated_branch_count (line 398) | pub fn allocated_branch_count(&self) -> usize {
  function branch_utilization (line 403) | pub fn branch_utilization(&self) -> f64 {
  function leaf_arena_stats (line 408) | pub fn leaf_arena_stats(&self) -> CompactArenaStats {
  function branch_arena_stats (line 413) | pub fn branch_arena_stats(&self) -> CompactArenaStats {
  function set_leaf_next (line 418) | pub fn set_leaf_next(&mut self, id: NodeId, next_id: NodeId) -> bool {
  function get_leaf_unchecked (line 435) | pub unsafe fn get_leaf_unchecked(&self, id: NodeId) -> &LeafNode<K, V> {
  function get_branch_unchecked (line 443) | pub unsafe fn get_branch_unchecked(&self, id: NodeId) -> &BranchNode<K, ...
  function test_compact_arena_basic_operations (line 453) | fn test_compact_arena_basic_operations() {
  function test_compact_arena_with_default (line 475) | fn test_compact_arena_with_default() {
  function test_unsafe_access (line 495) | fn test_unsafe_access() {

FILE: rust/src/comprehensive_performance_benchmark.rs
  function run_comprehensive_benchmark (line 8) | pub fn run_comprehensive_benchmark() {
  function setup_trees (line 37) | fn setup_trees(
  function benchmark_access (line 53) | fn benchmark_access(
  function benchmark_insert (line 98) | fn benchmark_insert(
  function benchmark_delete (line 157) | fn benchmark_delete(
  function benchmark_iterate (line 218) | fn benchmark_iterate(
  function test_comprehensive_benchmark (line 271) | fn test_comprehensive_benchmark() {

FILE: rust/src/construction.rs
  type InitResult (line 13) | pub type InitResult<T> = BTreeResult<T>;
  constant DEFAULT_CAPACITY (line 16) | pub const DEFAULT_CAPACITY: usize = 128;
  function new (line 37) | pub fn new(capacity: usize) -> InitResult<Self> {
  function with_default_capacity (line 69) | pub fn with_default_capacity() -> InitResult<Self> {
  function empty (line 91) | pub fn empty(capacity: usize) -> InitResult<Self> {
  function new (line 124) | pub fn new(capacity: usize) -> Self {
  function with_default_capacity (line 144) | pub fn with_default_capacity() -> Self {
  function with_reserved_capacity (line 165) | pub fn with_reserved_capacity(capacity: usize) -> Self {
  function new (line 190) | pub fn new(capacity: usize) -> Self {
  function with_default_capacity (line 209) | pub fn with_default_capacity() -> Self {
  function with_reserved_capacity (line 230) | pub fn with_reserved_capacity(capacity: usize) -> Self {
  method default (line 242) | fn default() -> Self {
  method default (line 249) | fn default() -> Self {
  method default (line 256) | fn default() -> Self {
  function validate_capacity (line 275) | pub fn validate_capacity(capacity: usize) -> BTreeResult<()> {
  function recommended_capacity (line 296) | pub fn recommended_capacity(expected_elements: usize) -> usize {
  function test_btree_construction (line 314) | fn test_btree_construction() {
  function test_btree_invalid_capacity (line 321) | fn test_btree_invalid_capacity() {
  function test_btree_default (line 328) | fn test_btree_default() {
  function test_btree_empty (line 334) | fn test_btree_empty() {
  function test_leaf_construction (line 342) | fn test_leaf_construction() {
  function test_leaf_with_reserved_capacity (line 349) | fn test_leaf_with_reserved_capacity() {
  function test_branch_construction (line 356) | fn test_branch_construction() {
  function test_validation (line 363) | fn test_validation() {
  function test_recommended_capacity (line 370) | fn test_recommended_capacity() {

FILE: rust/src/delete_operations.rs
  function remove (line 44) | pub fn remove(&mut self, key: &K) -> Option<V> {
  function remove_item (line 61) | pub fn remove_item(&mut self, key: &K) -> ModifyResult<V> {
  function remove_recursive (line 67) | fn remove_recursive(&mut self, node: &NodeRef<K, V>, key: &K) -> RemoveR...
  function collapse_root_if_needed (line 110) | fn collapse_root_if_needed(&mut self) {
  function create_empty_root_leaf (line 160) | fn create_empty_root_leaf(&mut self) {
  function is_node_underfull (line 167) | fn is_node_underfull(&self, node_ref: &NodeRef<K, V>) -> bool {
  function rebalance_child (line 182) | fn rebalance_child(&mut self, parent_id: NodeId, child_index: usize) -> ...
  function test_delete_operations_module_exists (line 256) | fn test_delete_operations_module_exists() {
  function test_optimized_rebalancing_reduces_arena_access (line 266) | fn test_optimized_rebalancing_reduces_arena_access() {
  function test_rebalancing_with_various_sibling_scenarios (line 299) | fn test_rebalancing_with_various_sibling_scenarios() {
  function test_delete_performance_characteristics (line 332) | fn test_delete_performance_characteristics() {
  function rebalance_leaf (line 369) | fn rebalance_leaf(
  function rebalance_branch (line 468) | fn rebalance_branch(
  function merge_with_left_branch (line 553) | fn merge_with_left_branch(&mut self, parent_id: NodeId, child_index: usi...
  function merge_with_right_branch (line 610) | fn merge_with_right_branch(&mut self, parent_id: NodeId, child_index: us...
  function borrow_from_left_branch_with (line 667) | fn borrow_from_left_branch_with(
  function borrow_from_right_branch_with (line 695) | fn borrow_from_right_branch_with(
  function borrow_from_left_leaf_with_ids (line 723) | fn borrow_from_left_leaf_with_ids(
  function borrow_from_right_leaf_with_ids (line 750) | fn borrow_from_right_leaf_with_ids(
  function merge_with_left_leaf_with_ids (line 778) | fn merge_with_left_leaf_with_ids(
  function merge_with_right_leaf_with_ids (line 806) | fn merge_with_right_leaf_with_ids(

FILE: rust/src/detailed_iterator_analysis.rs
  function analyze_iterator_implementation (line 7) | pub fn analyze_iterator_implementation() {
  function analyze_arena_access_pattern (line 33) | fn analyze_arena_access_pattern(bplus: &BPlusTreeMap<usize, usize>, size...
  function compare_iterator_implementations (line 76) | fn compare_iterator_implementations(bplus: &BPlusTreeMap<usize, usize>, ...
  function analyze_next_call_work (line 126) | fn analyze_next_call_work(bplus: &BPlusTreeMap<usize, usize>, _size: usi...
  function compare_with_btreemap (line 164) | fn compare_with_btreemap(bplus: &BPlusTreeMap<usize, usize>, size: usize) {
  function test_detailed_iterator_analysis (line 219) | fn test_detailed_iterator_analysis() {

FILE: rust/src/error.rs
  type BPlusTreeError (line 8) | pub enum BPlusTreeError {
    method invalid_capacity (line 29) | pub fn invalid_capacity(capacity: usize, min_required: usize) -> Self {
    method data_integrity (line 37) | pub fn data_integrity(context: &str, details: &str) -> Self {
    method arena_error (line 42) | pub fn arena_error(operation: &str, details: &str) -> Self {
    method node_error (line 47) | pub fn node_error(node_type: &str, node_id: u32, details: &str) -> Self {
    method corrupted_tree (line 52) | pub fn corrupted_tree(component: &str, details: &str) -> Self {
    method invalid_state (line 57) | pub fn invalid_state(operation: &str, state: &str) -> Self {
    method allocation_error (line 62) | pub fn allocation_error(resource: &str, reason: &str) -> Self {
    method is_capacity_error (line 67) | pub fn is_capacity_error(&self) -> bool {
    method is_arena_error (line 72) | pub fn is_arena_error(&self) -> bool {
    method fmt (line 78) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  type TreeResult (line 95) | pub(crate) type TreeResult<T> = Result<T, BPlusTreeError>;
  type BTreeResult (line 98) | pub type BTreeResult<T> = Result<T, BPlusTreeError>;
  type KeyResult (line 101) | pub type KeyResult<T> = Result<T, BPlusTreeError>;
  type ModifyResult (line 104) | pub type ModifyResult<T> = Result<T, BPlusTreeError>;
  type InitResult (line 107) | pub type InitResult<T> = Result<T, BPlusTreeError>;
  type BTreeResultExt (line 110) | pub trait BTreeResultExt<T> {
    method with_context (line 112) | fn with_context(self, context: &str) -> BTreeResult<T>;
    method with_operation (line 115) | fn with_operation(self, operation: &str) -> BTreeResult<T>;
    method or_default_with_log (line 118) | fn or_default_with_log(self) -> T
  function with_context (line 124) | fn with_context(self, context: &str) -> BTreeResult<T> {
  function with_operation (line 143) | fn with_operation(self, operation: &str) -> BTreeResult<T> {
  function or_default_with_log (line 147) | fn or_default_with_log(self) -> T

FILE: rust/src/get_operations.rs
  function get (line 34) | pub fn get(&self, key: &K) -> Option<&V> {
  function contains_key (line 62) | pub fn contains_key(&self, key: &K) -> bool {
  function get_or_default (line 87) | pub fn get_or_default<'a>(&'a self, key: &K, default: &'a V) -> &'a V {
  function get_item (line 112) | pub fn get_item(&self, key: &K) -> KeyResult<&V> {
  function get_mut (line 138) | pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
  function try_get (line 166) | pub fn try_get(&self, key: &K) -> KeyResult<&V> {
  function get_many (line 194) | pub fn get_many(&self, keys: &[K]) -> BTreeResult<Vec<&V>> {
  function get_child_for_key (line 217) | pub fn get_child_for_key(&self, branch_id: NodeId, key: &K) -> Option<(u...
  function get_leaf (line 233) | pub fn get_leaf(&self, id: NodeId) -> Option<&LeafNode<K, V>> {
  function get_leaf_mut (line 239) | pub fn get_leaf_mut(&mut self, id: NodeId) -> Option<&mut LeafNode<K, V>> {
  function get_leaf_next (line 244) | pub fn get_leaf_next(&self, id: NodeId) -> Option<NodeId> {
  function get_branch (line 256) | pub fn get_branch(&self, id: NodeId) -> Option<&BranchNode<K, V>> {
  function get_branch_mut (line 262) | pub fn get_branch_mut(&mut self, id: NodeId) -> Option<&mut BranchNode<K...
  function test_basic_get_operations (line 277) | fn test_basic_get_operations() {
  function test_get_or_default (line 303) | fn test_get_or_default() {
  function test_get_item (line 312) | fn test_get_item() {
  function test_get_mut (line 325) | fn test_get_mut() {
  function test_get_many (line 340) | fn test_get_many() {
  function test_try_get (line 359) | fn test_try_get() {
  function test_leaf_node_get_operations (line 369) | fn test_leaf_node_get_operations() {
  function test_branch_node_operations (line 395) | fn test_branch_node_operations() {

FILE: rust/src/insert_operations.rs
  function new_root (line 15) | pub fn new_root(&mut self, new_node: NodeRef<K, V>, separator_key: K) ->...
  function insert_into_leaf (line 31) | fn insert_into_leaf(&mut self, leaf_id: NodeId, key: K, value: V) -> Ins...
  function insert_recursive (line 123) | pub fn insert_recursive(
  function insert (line 230) | pub fn insert(&mut self, key: K, value: V) -> Option<V> {
  function test_insert_operations_module_exists (line 292) | fn test_insert_operations_module_exists() {

FILE: rust/src/iteration.rs
  type ItemIterator (line 14) | pub struct ItemIterator<'a, K, V> {
  type FastItemIterator (line 25) | pub struct FastItemIterator<'a, K, V> {
  type KeyIterator (line 34) | pub struct KeyIterator<'a, K, V> {
  type ValueIterator (line 39) | pub struct ValueIterator<'a, K, V> {
  type RangeIterator (line 45) | pub struct RangeIterator<'a, K, V> {
  function items (line 57) | pub fn items(&self) -> ItemIterator<'_, K, V> {
  function items_fast (line 67) | pub fn items_fast(&self) -> FastItemIterator<'_, K, V> {
  function keys (line 72) | pub fn keys(&self) -> KeyIterator<'_, K, V> {
  function values (line 77) | pub fn values(&self) -> ValueIterator<'_, K, V> {
  function items_range (line 84) | pub fn items_range<'a>(
  function new (line 103) | pub fn new(tree: &'a BPlusTreeMap<K, V>) -> Self {
  function new_from_position_with_bounds (line 121) | pub fn new_from_position_with_bounds(
  function try_get_next_item (line 148) | fn try_get_next_item(&mut self, leaf: &'a LeafNode<K, V>) -> Option<(&'a...
  function advance_to_next_leaf_direct (line 199) | fn advance_to_next_leaf_direct(&mut self) -> bool {
  type Item (line 225) | type Item = (&'a K, &'a V);
  method next (line 227) | fn next(&mut self) -> Option<Self::Item> {
  function new (line 259) | pub fn new(tree: &'a BPlusTreeMap<K, V>) -> Self {
  type Item (line 267) | type Item = &'a K;
  method next (line 269) | fn next(&mut self) -> Option<Self::Item> {
  function new (line 279) | pub fn new(tree: &'a BPlusTreeMap<K, V>) -> Self {
  type Item (line 287) | type Item = &'a V;
  method next (line 289) | fn next(&mut self) -> Option<Self::Item> {
  function new_with_skip_owned (line 299) | pub fn new_with_skip_owned(
  type Item (line 343) | type Item = (&'a K, &'a V);
  method next (line 345) | fn next(&mut self) -> Option<Self::Item> {
  function new (line 370) | pub fn new(tree: &'a BPlusTreeMap<K, V>) -> Self {
  type Item (line 388) | type Item = (&'a K, &'a V);
  method next (line 391) | fn next(&mut self) -> Option<Self::Item> {

FILE: rust/src/lib.rs
  function try_insert (line 59) | pub fn try_insert(&mut self, key: K, value: V) -> ModifyResult<Option<V>>
  function try_remove (line 80) | pub fn try_remove(&mut self, key: &K) -> ModifyResult<V> {
  function batch_insert (line 97) | pub fn batch_insert(&mut self, items: Vec<(K, V)>) -> ModifyResult<Vec<O...
  function test_leaf_caching_optimization_proof (line 184) | fn test_leaf_caching_optimization_proof() {
  function test_fast_iterator_also_uses_leaf_caching (line 216) | fn test_fast_iterator_also_uses_leaf_caching() {

FILE: rust/src/macros.rs
  function test_assert_tree_valid_macro (line 235) | fn test_assert_tree_valid_macro() {
  function test_create_test_tree_macro (line 249) | fn test_create_test_tree_macro() {
  function test_attack_pattern_macro (line 269) | fn test_attack_pattern_macro() {
  function test_verify_attack_result_macro (line 284) | fn test_verify_attack_result_macro() {
  function test_stress_test_macro (line 304) | fn test_stress_test_macro() {

FILE: rust/src/node.rs
  function get (line 20) | pub fn get(&self, key: &K) -> Option<&V> {
  function get_mut (line 28) | pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
  function len (line 35) | pub fn len(&self) -> usize {
  function keys (line 40) | pub fn keys(&self) -> &Vec<K> {
  function values (line 45) | pub fn values(&self) -> &Vec<V> {
  function values_mut (line 50) | pub fn values_mut(&mut self) -> &mut Vec<V> {
  function get_key (line 56) | pub fn get_key(&self, index: usize) -> Option<&K> {
  function get_value (line 62) | pub fn get_value(&self, index: usize) -> Option<&V> {
  function get_value_mut (line 68) | pub fn get_value_mut(&mut self, index: usize) -> Option<&mut V> {
  function first_key (line 74) | pub fn first_key(&self) -> Option<&K> {
  function last_key (line 80) | pub fn last_key(&self) -> Option<&K> {
  function keys_is_empty (line 86) | pub fn keys_is_empty(&self) -> bool {
  function keys_len (line 92) | pub fn keys_len(&self) -> usize {
  function values_len (line 98) | pub fn values_len(&self) -> usize {
  function get_key_unchecked (line 146) | pub unsafe fn get_key_unchecked(&self, index: usize) -> &K {
  function get_value_unchecked (line 171) | pub unsafe fn get_value_unchecked(&self, index: usize) -> &V {
  function get_key_value_unchecked (line 198) | pub unsafe fn get_key_value_unchecked(&self, index: usize) -> (&K, &V) {
  function push_key (line 207) | pub fn push_key(&mut self, key: K) {
  function push_value (line 213) | pub fn push_value(&mut self, value: V) {
  function append_keys (line 219) | pub fn append_keys(&mut self, other: &mut Vec<K>) {
  function append_values (line 225) | pub fn append_values(&mut self, other: &mut Vec<V>) {
  function take_keys (line 231) | pub fn take_keys(&mut self) -> Vec<K> {
  function take_values (line 237) | pub fn take_values(&mut self) -> Vec<V> {
  function binary_search_keys (line 243) | pub fn binary_search_keys(&self, key: &K) -> Result<usize, usize>
  function into_keys_values (line 251) | pub fn into_keys_values(self) -> (impl Iterator<Item = K>, impl Iterator...
  function get_key_at (line 256) | pub fn get_key_at(&self, index: usize) -> Option<&K> {
  function get_value_at (line 261) | pub fn get_value_at(&self, index: usize) -> Option<&V> {
  function insert_at (line 266) | pub fn insert_at(&mut self, index: usize, key: K, value: V) {
  function remove_at (line 272) | pub fn remove_at(&mut self, index: usize) -> Option<(K, V)> {
  function pop (line 283) | pub fn pop(&mut self) -> Option<(K, V)> {
  function remove_first (line 292) | pub fn remove_first(&mut self) -> Option<(K, V)> {
  function insert (line 307) | pub fn insert(&mut self, key: K, value: V) -> InsertResult<K, V> {
  function insert_at_index (line 353) | pub fn insert_at_index(&mut self, index: usize, key: K, value: V) {
  function split (line 359) | pub fn split(&mut self) -> LeafNode<K, V> {
  function remove (line 401) | pub fn remove(&mut self, key: &K) -> (Option<V>, bool) {
  function is_empty (line 418) | pub fn is_empty(&self) -> bool {
  function is_full (line 423) | pub fn is_full(&self) -> bool {
  function needs_split (line 429) | pub fn needs_split(&self) -> bool {
  function is_underfull (line 435) | pub fn is_underfull(&self) -> bool {
  function can_donate (line 441) | pub fn can_donate(&self) -> bool {
  function min_keys (line 451) | pub fn min_keys(&self) -> usize {
  function borrow_last (line 462) | pub fn borrow_last(&mut self) -> Option<(K, V)> {
  function borrow_first (line 470) | pub fn borrow_first(&mut self) -> Option<(K, V)> {
  function accept_from_left (line 478) | pub fn accept_from_left(&mut self, key: K, value: V) {
  function accept_from_right (line 484) | pub fn accept_from_right(&mut self, key: K, value: V) {
  function merge_from (line 490) | pub fn merge_from(&mut self, other: &mut LeafNode<K, V>) -> NodeId {
  function extract_all (line 501) | pub fn extract_all(&mut self) -> (Vec<K>, Vec<V>, NodeId) {
  function insert_child_and_split_if_needed (line 522) | pub fn insert_child_and_split_if_needed(
  function split_data (line 548) | pub fn split_data(&mut self) -> (BranchNode<K, V>, K) {
  function is_empty (line 586) | pub fn is_empty(&self) -> bool {
  function is_full (line 591) | pub fn is_full(&self) -> bool {
  function is_underfull (line 597) | pub fn is_underfull(&self) -> bool {
  function can_donate (line 603) | pub fn can_donate(&self) -> bool {
  function min_keys (line 613) | pub fn min_keys(&self) -> usize {
  function find_child_index (line 621) | pub fn find_child_index(&self, key: &K) -> usize {
  function len (line 630) | pub fn len(&self) -> usize {
  function needs_split (line 636) | pub fn needs_split(&self) -> bool {
  function get_child (line 642) | pub fn get_child(&self, key: &K) -> Option<&NodeRef<K, V>> {
  function get_child_mut (line 652) | pub fn get_child_mut(&mut self, key: &K) -> Option<&mut NodeRef<K, V>> {
  function borrow_last (line 665) | pub fn borrow_last(&mut self) -> Option<(K, NodeRef<K, V>)> {
  function borrow_first (line 675) | pub fn borrow_first(&mut self) -> Option<(K, NodeRef<K, V>)> {
  function accept_from_left (line 686) | pub fn accept_from_left(
  function accept_from_right (line 699) | pub fn accept_from_right(
  function merge_from (line 711) | pub fn merge_from(&mut self, separator: K, other: &mut BranchNode<K, V>) {

FILE: rust/src/range_queries.rs
  type RangeAnalysisResult (line 11) | type RangeAnalysisResult<K> = (Option<(NodeId, usize)>, bool, Option<(K,...
  function range (line 49) | pub fn range<R>(&self, range: R) -> RangeIterator<'_, K, V>
  function first (line 58) | pub fn first(&self) -> Option<(&K, &V)> {
  function last (line 63) | pub fn last(&self) -> Option<(&K, &V)> {
  function resolve_range_bounds (line 72) | pub fn resolve_range_bounds<R>(&self, range: R) -> RangeAnalysisResult<K>

FILE: rust/src/tree_structure.rs
  function len (line 15) | pub fn len(&self) -> usize {
  function len_recursive (line 20) | fn len_recursive(&self, node: &NodeRef<K, V>) -> usize {
  function is_empty (line 37) | pub fn is_empty(&self) -> bool {
  function is_leaf_root (line 42) | pub fn is_leaf_root(&self) -> bool {
  function leaf_count (line 47) | pub fn leaf_count(&self) -> usize {
  function leaf_count_recursive (line 52) | fn leaf_count_recursive(&self, node: &NodeRef<K, V>) -> usize {
  function clear (line 69) | pub fn clear(&mut self) {
  function count_nodes_in_tree (line 81) | pub fn count_nodes_in_tree(&self) -> (usize, usize) {
  function count_nodes_recursive (line 91) | fn count_nodes_recursive(&self, node: &NodeRef<K, V>) -> (usize, usize) {
  function get_first_leaf_id (line 120) | pub fn get_first_leaf_id(&self) -> Option<NodeId> {
  function find_leaf_for_key (line 144) | pub(crate) fn find_leaf_for_key(&self, key: &K) -> Option<(NodeId, usize...
  function find_leaf_for_key_with_match (line 180) | pub(crate) fn find_leaf_for_key_with_match(&self, key: &K) -> Option<(No...
  function find_child (line 219) | pub fn find_child(&self, branch_id: NodeId, key: &K) -> Option<(usize, N...
  function find_child_mut (line 227) | pub fn find_child_mut(&mut self, branch_id: NodeId, key: &K) -> Option<(...

FILE: rust/src/types.rs
  constant MIN_CAPACITY (line 14) | pub(crate) const MIN_CAPACITY: usize = 4;
  type NodeId (line 21) | pub type NodeId = u32;
  constant NULL_NODE (line 24) | pub const NULL_NODE: NodeId = u32::MAX;
  constant ROOT_NODE (line 25) | pub const ROOT_NODE: NodeId = 0;
  type BPlusTreeMap (line 76) | pub struct BPlusTreeMap<K, V> {
  type LeafNode (line 91) | pub struct LeafNode<K, V> {
  type BranchNode (line 109) | pub struct BranchNode<K, V> {
  type NodeRef (line 124) | pub enum NodeRef<K, V> {
  method clone (line 130) | fn clone(&self) -> Self {
  function id (line 139) | pub fn id(&self) -> NodeId {
  function is_leaf (line 147) | pub fn is_leaf(&self) -> bool {
  type SplitNodeData (line 153) | pub enum SplitNodeData<K, V> {
  type InsertResult (line 162) | pub enum InsertResult<K, V> {
  type RemoveResult (line 176) | pub enum RemoveResult<V> {

FILE: rust/src/validation.rs
  function check_invariants (line 16) | pub fn check_invariants(&self) -> bool {
  function check_invariants_detailed (line 21) | pub fn check_invariants_detailed(&self) -> Result<(), String> {
  function check_arena_tree_consistency (line 37) | fn check_arena_tree_consistency(&self) -> TreeResult<()> {
  function check_linked_list_invariants (line 74) | fn check_linked_list_invariants(&self) -> Result<(), String> {
  function check_leaf_linked_list_completeness (line 98) | fn check_leaf_linked_list_completeness(&self) -> TreeResult<()> {
  function collect_leaf_ids (line 136) | fn collect_leaf_ids(&self, node: &NodeRef<K, V>, ids: &mut Vec<NodeId>) {
  function check_node_invariants (line 150) | fn check_node_invariants(
  function validate (line 283) | pub fn validate(&self) -> Result<(), String> {
  function slice (line 288) | pub fn slice(&self) -> Vec<(&K, &V)> {
  function leaf_sizes (line 293) | pub fn leaf_sizes(&self) -> Vec<usize> {
  function print_node_chain (line 300) | pub fn print_node_chain(&self) {
  function collect_leaf_sizes (line 306) | fn collect_leaf_sizes(&self, node: &NodeRef<K, V>, sizes: &mut Vec<usize...
  function print_node (line 324) | fn print_node(&self, node: &NodeRef<K, V>, depth: usize) {
  function validate_for_operation (line 365) | pub fn validate_for_operation(&self, operation: &str) -> crate::error::B...

FILE: rust/tests/adversarial_arena_corruption.rs
  function test_arena_id_exhaustion_attack (line 10) | fn test_arena_id_exhaustion_attack() {
  function test_concurrent_arena_access_simulation (line 28) | fn test_concurrent_arena_access_simulation() {
  function test_arena_growth_boundary_attack (line 43) | fn test_arena_growth_boundary_attack() {
  function test_free_list_corruption_attack (line 82) | fn test_free_list_corruption_attack() {
  function test_deep_recursion_arena_explosion (line 142) | fn test_deep_recursion_arena_explosion() {
  function test_force_arena_corruption_panic (line 196) | fn test_force_arena_corruption_panic() {

FILE: rust/tests/adversarial_branch_rebalancing.rs
  function test_cascading_branch_rebalance_attack (line 9) | fn test_cascading_branch_rebalance_attack() {
  function test_branch_borrow_from_underfull_sibling_attack (line 65) | fn test_branch_borrow_from_underfull_sibling_attack() {
  function test_branch_merge_with_maximum_keys_attack (line 105) | fn test_branch_merge_with_maximum_keys_attack() {
  function test_alternating_sibling_operations_attack (line 153) | fn test_alternating_sibling_operations_attack() {
  function test_deep_tree_branch_collapse_attack (line 200) | fn test_deep_tree_branch_collapse_attack() {
  function test_force_branch_rebalance_panic (line 251) | fn test_force_branch_rebalance_panic() {

FILE: rust/tests/adversarial_edge_cases.rs
  function test_root_collapse_infinite_loop_attack (line 8) | fn test_root_collapse_infinite_loop_attack() {
  function test_minimum_capacity_edge_cases_attack (line 35) | fn test_minimum_capacity_edge_cases_attack() {
  function test_odd_capacity_arithmetic_attack (line 75) | fn test_odd_capacity_arithmetic_attack() {
  function test_insert_remove_same_key_attack (line 119) | fn test_insert_remove_same_key_attack() {
  function test_get_mut_corruption_attack (line 150) | fn test_get_mut_corruption_attack() {
  function test_split_merge_thrashing_attack (line 188) | fn test_split_merge_thrashing_attack() {
  function test_extreme_key_values_attack (line 227) | fn test_extreme_key_values_attack() {
  function test_ultimate_adversarial_attack (line 287) | fn test_ultimate_adversarial_attack() {

FILE: rust/tests/adversarial_linked_list.rs
  function test_linked_list_cycle_attack (line 9) | fn test_linked_list_cycle_attack() {
  function test_concurrent_iteration_modification_attack (line 51) | fn test_concurrent_iteration_modification_attack() {
  function test_split_during_iteration_attack (line 100) | fn test_split_during_iteration_attack() {
  function test_range_iterator_boundary_attack (line 141) | fn test_range_iterator_boundary_attack() {
  function test_linked_list_fragmentation_attack (line 211) | fn test_linked_list_fragmentation_attack() {
  function test_iterator_state_corruption_attack (line 266) | fn test_iterator_state_corruption_attack() {
  function test_force_linked_list_corruption (line 320) | fn test_force_linked_list_corruption() {

FILE: rust/tests/bplus_tree.rs
  function test_node_ref_id_and_is_leaf (line 12) | fn test_node_ref_id_and_is_leaf() {
  function test_insert_overwrite_value (line 27) | fn test_insert_overwrite_value() {
  function test_create_empty_tree (line 43) | fn test_create_empty_tree() {
  function test_insert_and_get_single_item (line 51) | fn test_insert_and_get_single_item() {
  function test_insert_multiple_items (line 62) | fn test_insert_multiple_items() {
  function test_update_existing_key (line 76) | fn test_update_existing_key() {
  function test_contains_key (line 88) | fn test_contains_key() {
  function test_get_with_default (line 100) | fn test_get_with_default() {
  function test_overflow (line 118) | fn test_overflow() {
  function test_split_then_add (line 139) | fn test_split_then_add() {
  function test_many_insertions_maintain_invariants (line 169) | fn test_many_insertions_maintain_invariants() {
  function test_parent_splitting (line 185) | fn test_parent_splitting() {
  function test_remove_single_item_from_leaf_root (line 210) | fn test_remove_single_item_from_leaf_root() {
  function test_remove_multiple_items_from_leaf_root (line 228) | fn test_remove_multiple_items_from_leaf_root() {
  function test_remove_nonexistent_key_returns_none (line 268) | fn test_remove_nonexistent_key_returns_none() {
  function test_remove_from_tree_with_branch_root (line 291) | fn test_remove_from_tree_with_branch_root() {
  function test_remove_multiple_from_tree_with_branches (line 316) | fn test_remove_multiple_from_tree_with_branches() {
  function test_insert_through_branch_node (line 421) | fn test_insert_through_branch_node() {
  function test_leaf_split_updates_parent_branch (line 474) | fn test_leaf_split_updates_parent_branch() {
  function test_root_promotion_leaf_to_branch (line 539) | fn test_root_promotion_leaf_to_branch() {
  function test_branch_node_split_creates_new_level (line 631) | fn test_branch_node_split_creates_new_level() {
  function test_comprehensive_insert_scenarios (line 721) | fn test_comprehensive_insert_scenarios() {
  function test_leaf_allocation (line 854) | fn test_leaf_allocation() {
  function test_leaf_linked_list (line 941) | fn test_leaf_linked_list() {
  function test_invalid_capacity_error (line 1064) | fn test_invalid_capacity_error() {
  function test_key_error_on_missing_key (line 1082) | fn test_key_error_on_missing_key() {
  function test_remove_nonexistent_key_raises_error (line 1096) | fn test_remove_nonexistent_key_raises_error() {
  function test_iterate_empty_tree (line 1116) | fn test_iterate_empty_tree() {
  function test_iterate_single_item (line 1123) | fn test_iterate_single_item() {
  function test_iterate_multiple_items_single_leaf (line 1132) | fn test_iterate_multiple_items_single_leaf() {
  function test_iterate_multiple_leaves (line 1152) | fn test_iterate_multiple_leaves() {
  function test_keys_iterator (line 1171) | fn test_keys_iterator() {
  function test_values_iterator (line 1182) | fn test_values_iterator() {
  function test_iterate_from_key (line 1200) | fn test_iterate_from_key() {
  function test_iterate_until_key (line 1217) | fn test_iterate_until_key() {
  function test_iterate_range (line 1234) | fn test_iterate_range() {
  function test_iterate_from_nonexistent_key (line 1251) | fn test_iterate_from_nonexistent_key() {
  function test_iterate_empty_range (line 1266) | fn test_iterate_empty_range() {
  function test_invariants_empty_tree (line 1282) | fn test_invariants_empty_tree() {
  function test_invariants_single_item (line 1288) | fn test_invariants_single_item() {
  function test_invariants_after_split (line 1295) | fn test_invariants_after_split() {
  function test_invariants_after_many_operations (line 1309) | fn test_invariants_after_many_operations() {
  function test_large_capacity_edge_cases (line 1348) | fn test_large_capacity_edge_cases() {
  function test_capacity_boundary_conditions (line 1378) | fn test_capacity_boundary_conditions() {
  function test_sequential_vs_random_patterns (line 1411) | fn test_sequential_vs_random_patterns() {
  function test_deep_tree_insertion (line 1458) | fn test_deep_tree_insertion() {
  function test_branch_node_splitting (line 1482) | fn test_branch_node_splitting() {
  function test_multi_level_splits (line 1506) | fn test_multi_level_splits() {
  function test_large_sequential_insertion (line 1533) | fn test_large_sequential_insertion() {
  function test_reverse_order_insertion (line 1560) | fn test_reverse_order_insertion() {
  function test_delete_until_empty (line 1590) | fn test_delete_until_empty() {
  function test_root_collapse (line 1622) | fn test_root_collapse() {
  function test_alternating_insert_delete (line 1648) | fn test_alternating_insert_delete() {
  function test_delete_from_deep_tree (line 1669) | fn test_delete_from_deep_tree() {
  function test_delete_all_but_one (line 1702) | fn test_delete_all_but_one() {
  function test_massive_insertion_deletion_cycle (line 1736) | fn test_massive_insertion_deletion_cycle() {
  function test_random_deletion_pattern (line 1777) | fn test_random_deletion_pattern() {
  function test_delete_from_minimal_tree (line 1810) | fn test_delete_from_minimal_tree() {
  function test_stress_deletion_with_invariants (line 1835) | fn test_stress_deletion_with_invariants() {
  function test_single_key_operations (line 1864) | fn test_single_key_operations() {
  function test_duplicate_key_handling (line 1888) | fn test_duplicate_key_handling() {
  function test_extreme_capacity_values (line 1908) | fn test_extreme_capacity_values() {
  function test_pathological_deletion_patterns (line 1935) | fn test_pathological_deletion_patterns() {
  function test_clustered_key_patterns (line 1966) | fn test_clustered_key_patterns() {
  function test_interleaved_operations (line 1999) | fn test_interleaved_operations() {
  function test_clear_and_reuse (line 2027) | fn test_clear_and_reuse() {
  function test_range_query_edge_cases (line 2052) | fn test_range_query_edge_cases() {
  function test_range_syntax_support (line 2076) | fn test_range_syntax_support() {
  function test_range_syntax_with_excluded_bounds (line 2117) | fn test_range_syntax_with_excluded_bounds() {
  function test_first_and_last (line 2146) | fn test_first_and_last() {
  function test_get_mut (line 2162) | fn test_get_mut() {
  function test_arena_consistency (line 2180) | fn test_arena_consistency() {
  function test_leaf_linked_list_completeness (line 2209) | fn test_leaf_linked_list_completeness() {
  function test_try_insert_and_remove (line 2226) | fn test_try_insert_and_remove() {
  function test_batch_insert (line 2242) | fn test_batch_insert() {
  function test_get_many (line 2260) | fn test_get_many() {
  function test_validate_for_operation (line 2282) | fn test_validate_for_operation() {

FILE: rust/tests/bug_reproduction_tests.rs
  function test_memory_leak_in_root_creation (line 8) | fn test_memory_leak_in_root_creation() {
  function test_linked_list_corruption_during_merge (line 31) | fn test_linked_list_corruption_during_merge() {
  function test_incorrect_split_logic_odd_capacity (line 69) | fn test_incorrect_split_logic_odd_capacity() {
  function test_root_split_linked_list_race (line 88) | fn test_root_split_linked_list_race() {
  function test_range_iterator_bound_handling (line 110) | fn test_range_iterator_bound_handling() {
  function test_min_keys_calculation_inconsistency (line 142) | fn test_min_keys_calculation_inconsistency() {
  function test_incomplete_rebalancing_logic (line 168) | fn test_incomplete_rebalancing_logic() {
  function test_arena_tree_consistency (line 197) | fn test_arena_tree_consistency() {
  function test_iterator_lifetime_safety (line 221) | fn test_iterator_lifetime_safety() {
  function test_root_collapse_edge_cases (line 235) | fn test_root_collapse_edge_cases() {
  function test_arena_id_collision (line 260) | fn test_arena_id_collision() {
  function test_split_validation_missing (line 282) | fn test_split_validation_missing() {

FILE: rust/tests/critical_bug_test.rs
  function test_linked_list_corruption_causes_data_loss (line 9) | fn test_linked_list_corruption_causes_data_loss() {
  function demonstrate_memory_leak_accumulation (line 66) | fn demonstrate_memory_leak_accumulation() {
  function test_invariants_after_problematic_operations (line 96) | fn test_invariants_after_problematic_operations() {
  function stress_test_arena_consistency (line 136) | fn stress_test_arena_consistency() {

FILE: rust/tests/debug_infinite_loop.rs
  function test_empty_tree_leaf_count (line 8) | fn test_empty_tree_leaf_count() {
  function test_tree_creation_only (line 20) | fn test_tree_creation_only() {
  function test_leaf_sizes (line 27) | fn test_leaf_sizes() {
  function test_single_insertion (line 39) | fn test_single_insertion() {
  function test_split_balance (line 54) | fn test_split_balance() {

FILE: rust/tests/enhanced_error_handling.rs
  function test_enhanced_error_constructors (line 18) | fn test_enhanced_error_constructors() {
  function test_result_type_aliases (line 64) | fn test_result_type_aliases() {
  function test_result_extension_trait (line 99) | fn test_result_extension_trait() {
  function test_get_or_default (line 135) | fn test_get_or_default() {
  function test_try_get (line 153) | fn test_try_get() {
  function test_try_insert_and_try_remove (line 172) | fn test_try_insert_and_try_remove() {
  function test_batch_insert (line 201) | fn test_batch_insert() {
  function test_get_many (line 228) | fn test_get_many() {
  function test_validate_for_operation (line 254) | fn test_validate_for_operation() {
  function test_error_context_propagation (line 271) | fn test_error_context_propagation() {
  function test_integration_with_existing_api (line 294) | fn test_integration_with_existing_api() {
  function test_error_recovery_patterns (line 324) | fn test_error_recovery_patterns() {
  function test_error_handling_performance (line 347) | fn test_error_handling_performance() {
  function test_comprehensive_error_scenario (line 376) | fn test_comprehensive_error_scenario() {

FILE: rust/tests/error_handling_consistency.rs
  function test_public_api_error_consistency (line 11) | fn test_public_api_error_consistency() {
  function test_error_message_formatting (line 67) | fn test_error_message_formatting() {
  function test_edge_case_error_handling (line 108) | fn test_edge_case_error_handling() {
  function test_error_propagation (line 151) | fn test_error_propagation() {
  function test_operation_safety (line 192) | fn test_operation_safety() {
  function test_error_recovery (line 244) | fn test_error_recovery() {
  function test_internal_error_consistency (line 305) | fn test_internal_error_consistency() {

FILE: rust/tests/fuzz_tests.rs
  function fuzz_test_bplustree (line 17) | fn fuzz_test_bplustree() {
  function fuzz_test_with_random_keys (line 151) | fn fuzz_test_with_random_keys() {
  function fuzz_test_with_updates (line 260) | fn fuzz_test_with_updates() {
  function fuzz_test_timed (line 334) | fn fuzz_test_timed() {
  function parse_duration (line 427) | fn parse_duration(s: &str) -> Result<Duration, String> {

FILE: rust/tests/linked_list_corruption_detection.rs
  function test_intensive_linked_list_corruption_detection (line 9) | fn test_intensive_linked_list_corruption_detection() {
  function test_merge_scenarios_linked_list_integrity (line 135) | fn test_merge_scenarios_linked_list_integrity() {
  function test_linked_list_edge_cases (line 227) | fn test_linked_list_edge_cases() {
  function test_linked_list_stress_consistency (line 281) | fn test_linked_list_stress_consistency() {

FILE: rust/tests/memory_leak_detection.rs
  function test_memory_leak_regression_prevention (line 13) | fn test_memory_leak_regression_prevention() {
  function test_root_split_no_memory_accumulation (line 157) | fn test_root_split_no_memory_accumulation() {
  function test_arena_fragmentation_and_reuse (line 193) | fn test_arena_fragmentation_and_reuse() {
  function test_stress_allocation_deallocation_cycles (line 245) | fn test_stress_allocation_deallocation_cycles() {
  function test_edge_case_memory_scenarios (line 287) | fn test_edge_case_memory_scenarios() {

FILE: rust/tests/memory_safety_audit.rs
  function test_arena_bounds_checking (line 11) | fn test_arena_bounds_checking() {
  function test_node_id_capacity_limits (line 54) | fn test_node_id_capacity_limits() {
  function test_arena_iteration_type_safety (line 92) | fn test_arena_iteration_type_safety() {
  function test_integer_overflow_prevention (line 131) | fn test_integer_overflow_prevention() {
  function test_memory_safety_stress (line 172) | fn test_memory_safety_stress() {
  function test_arena_operations_bounds (line 217) | fn test_arena_operations_bounds() {

FILE: rust/tests/range_bounds_syntax.rs
  function test_range_syntax_inclusive (line 4) | fn test_range_syntax_inclusive() {
  function test_range_syntax_exclusive (line 25) | fn test_range_syntax_exclusive() {
  function test_range_syntax_from (line 45) | fn test_range_syntax_from() {
  function test_range_syntax_to (line 57) | fn test_range_syntax_to() {
  function test_range_syntax_to_inclusive (line 69) | fn test_range_syntax_to_inclusive() {
  function test_range_syntax_full (line 81) | fn test_range_syntax_full() {
  function test_range_syntax_empty_ranges (line 93) | fn test_range_syntax_empty_ranges() {
  function test_range_syntax_edge_cases (line 113) | fn test_range_syntax_edge_cases() {
  function test_range_syntax_with_strings (line 133) | fn test_range_syntax_with_strings() {
  function test_range_syntax_single_element (line 156) | fn test_range_syntax_single_element() {
  function test_range_syntax_excluded_start (line 172) | fn test_range_syntax_excluded_start() {

FILE: rust/tests/range_differential.rs
  function populate_maps (line 4) | fn populate_maps(capacity: usize, data: &[i32]) -> (BPlusTreeMap<i32, i3...
  function test_range_differential_basic_boundaries (line 15) | fn test_range_differential_basic_boundaries() {
  function test_range_differential_gaps_and_nonexistent_bounds (line 64) | fn test_range_differential_gaps_and_nonexistent_bounds() {

FILE: rust/tests/remove_operations.rs
  function test_underfull_child_rebalancing_path (line 7) | fn test_underfull_child_rebalancing_path() {
  function test_underfull_leaf_detection (line 80) | fn test_underfull_leaf_detection() {
  function test_underfull_without_root_collapse (line 135) | fn test_underfull_without_root_collapse() {
  function test_demonstrates_need_for_borrowing_and_merging (line 189) | fn test_demonstrates_need_for_borrowing_and_merging() {
  function test_underfull_nodes_violate_invariants (line 246) | fn test_underfull_nodes_violate_invariants() {
  function test_strict_invariant_checking_should_fail (line 279) | fn test_strict_invariant_checking_should_fail() {
  function test_bplustree_remove_existing_key (line 306) | fn test_bplustree_remove_existing_key() {
  function test_bplustree_remove_with_underflow (line 328) | fn test_bplustree_remove_with_underflow() {
  function test_bplustree_remove_last_key_from_tree (line 367) | fn test_bplustree_remove_last_key_from_tree() {
  function test_bplustree_remove_all_keys_from_single_node (line 394) | fn test_bplustree_remove_all_keys_from_single_node() {
  function test_bplustree_remove_from_first_node_causing_empty (line 432) | fn test_bplustree_remove_from_first_node_causing_empty() {
  function test_bplustree_remove_with_root_node_empty_validation (line 463) | fn test_bplustree_remove_with_root_node_empty_validation() {
  function test_remove_nonexistent_key (line 485) | fn test_remove_nonexistent_key() {

FILE: rust/tests/simple_bug_tests.rs
  function test_memory_leak_placeholder (line 6) | fn test_memory_leak_placeholder() {
  function test_odd_capacity_split (line 40) | fn test_odd_capacity_split() {
  function test_linked_list_integrity (line 63) | fn test_linked_list_integrity() {
  function test_range_excluded_bounds (line 104) | fn test_range_excluded_bounds() {
  function test_min_keys_consistency (line 142) | fn test_min_keys_consistency() {
  function test_rebalancing_after_deletions (line 159) | fn test_rebalancing_after_deletions() {
  function test_iterator_consistency (line 193) | fn test_iterator_consistency() {
  function test_arena_utilization (line 211) | fn test_arena_utilization() {

FILE: rust/tests/specific_bug_demos.rs
  function demonstrate_memory_leak_bug (line 8) | fn demonstrate_memory_leak_bug() {
  function demonstrate_incorrect_split_for_odd_capacity (line 39) | fn demonstrate_incorrect_split_for_odd_capacity() {
  function demonstrate_min_keys_inconsistency (line 72) | fn demonstrate_min_keys_inconsistency() {
  function demonstrate_range_iterator_excluded_bound_bug (line 98) | fn demonstrate_range_iterator_excluded_bound_bug() {
  function demonstrate_linked_list_merge_corruption (line 137) | fn demonstrate_linked_list_merge_corruption() {
  function demonstrate_rebalancing_issues (line 181) | fn demonstrate_rebalancing_issues() {
  function demonstrate_arena_tree_consistency_issues (line 224) | fn demonstrate_arena_tree_consistency_issues() {
  function demonstrate_root_collapse_edge_case (line 273) | fn demonstrate_root_collapse_edge_case() {
  function verify_all_bugs_detected (line 314) | fn verify_all_bugs_detected() {

FILE: rust/tests/test_utils.rs
  function create_tree_4 (line 12) | pub fn create_tree_4() -> BPlusTreeMap<i32, String> {
  function create_tree_4_int (line 17) | pub fn create_tree_4_int() -> BPlusTreeMap<i32, i32> {
  function create_tree_5 (line 22) | pub fn create_tree_5() -> BPlusTreeMap<i32, String> {
  function create_tree_6 (line 27) | pub fn create_tree_6() -> BPlusTreeMap<i32, String> {
  function create_tree_capacity (line 32) | pub fn create_tree_capacity(capacity: usize) -> BPlusTreeMap<i32, String> {
  function create_tree_capacity_int (line 37) | pub fn create_tree_capacity_int(capacity: usize) -> BPlusTreeMap<i32, i3...
  function insert_sequential_range (line 49) | pub fn insert_sequential_range(tree: &mut BPlusTreeMap<i32, String>, cou...
  function insert_sequential_range_int (line 56) | pub fn insert_sequential_range_int(tree: &mut BPlusTreeMap<i32, i32>, co...
  function insert_with_multiplier (line 63) | pub fn insert_with_multiplier(tree: &mut BPlusTreeMap<i32, String>, coun...
  function insert_with_multiplier_int (line 71) | pub fn insert_with_multiplier_int(
  function insert_with_offset_multiplier (line 83) | pub fn insert_with_offset_multiplier(
  function insert_with_custom_fn (line 96) | pub fn insert_with_custom_fn<F, G>(
  function insert_range (line 113) | pub fn insert_range(tree: &mut BPlusTreeMap<i32, String>, start: usize, ...
  function insert_range_int (line 120) | pub fn insert_range_int(tree: &mut BPlusTreeMap<i32, i32>, start: usize,...
  function create_tree_4_with_data (line 131) | pub fn create_tree_4_with_data(count: usize) -> BPlusTreeMap<i32, String> {
  function create_tree_4_int_with_data (line 138) | pub fn create_tree_4_int_with_data(count: usize) -> BPlusTreeMap<i32, i3...
  function create_tree_with_data (line 145) | pub fn create_tree_with_data(capacity: usize, count: usize) -> BPlusTree...
  function create_tree_int_with_data (line 152) | pub fn create_tree_int_with_data(capacity: usize, count: usize) -> BPlus...
  function create_tree_4_with_multiplier (line 159) | pub fn create_tree_4_with_multiplier(count: usize, multiplier: i32) -> B...
  function assert_invariants (line 170) | pub fn assert_invariants(tree: &BPlusTreeMap<i32, String>, context: &str) {
  function assert_invariants_int (line 177) | pub fn assert_invariants_int(tree: &BPlusTreeMap<i32, i32>, context: &st...
  function assert_full_validation (line 184) | pub fn assert_full_validation(tree: &BPlusTreeMap<i32, String>, context:...
  function assert_full_validation_int (line 190) | pub fn assert_full_validation_int(tree: &BPlusTreeMap<i32, i32>, context...
  function deletion_range_attack (line 200) | pub fn deletion_range_attack(tree: &mut BPlusTreeMap<i32, String>, start...
  function deletion_range_attack_int (line 207) | pub fn deletion_range_attack_int(tree: &mut BPlusTreeMap<i32, i32>, star...
  function alternating_deletion_attack (line 214) | pub fn alternating_deletion_attack(tree: &mut BPlusTreeMap<i32, String>,...
  function stress_test_cycle (line 221) | pub fn stress_test_cycle<F>(tree: &mut BPlusTreeMap<i32, String>, cycles...
  function arena_exhaustion_attack (line 236) | pub fn arena_exhaustion_attack(tree: &mut BPlusTreeMap<i32, String>, cyc...
  function fragmentation_attack (line 258) | pub fn fragmentation_attack(tree: &mut BPlusTreeMap<i32, String>, base_k...
  function deep_tree_attack (line 276) | pub fn deep_tree_attack(tree: &mut BPlusTreeMap<i32, i32>, capacity: usi...
  function alternating_operations_attack (line 289) | pub fn alternating_operations_attack(tree: &mut BPlusTreeMap<i32, String...
  function verify_ordering (line 312) | pub fn verify_ordering(tree: &BPlusTreeMap<i32, String>) {
  function verify_ordering_int (line 322) | pub fn verify_ordering_int(tree: &BPlusTreeMap<i32, i32>) {
  function verify_item_count (line 332) | pub fn verify_item_count(tree: &BPlusTreeMap<i32, String>, expected: usi...
  function verify_item_count_int (line 343) | pub fn verify_item_count_int(tree: &BPlusTreeMap<i32, i32>, expected: us...
  function create_branch_test_tree (line 358) | pub fn create_branch_test_tree(capacity: usize) -> BPlusTreeMap<i32, Str...
  function setup_concurrent_simulation (line 378) | pub fn setup_concurrent_simulation() -> (Vec<(bool, i32)>, Vec<(bool, i3...
  function execute_interleaved_ops (line 399) | pub fn execute_interleaved_ops(
  function print_tree_stats (line 434) | pub fn print_tree_stats(tree: &BPlusTreeMap<i32, String>, label: &str) {
  function print_tree_stats_int (line 448) | pub fn print_tree_stats_int(tree: &BPlusTreeMap<i32, i32>, label: &str) {
  function create_attack_tree (line 466) | pub fn create_attack_tree(capacity: usize) -> BPlusTreeMap<i32, String> {
  function create_simple_tree (line 471) | pub fn create_simple_tree(capacity: usize) -> BPlusTreeMap<i32, i32> {
  function populate_sequential (line 476) | pub fn populate_sequential(tree: &mut BPlusTreeMap<i32, String>, count: ...
  function populate_sequential_int (line 481) | pub fn populate_sequential_int(tree: &mut BPlusTreeMap<i32, i32>, count:...
  function populate_sequential_int_x10 (line 486) | pub fn populate_sequential_int_x10(tree: &mut BPlusTreeMap<i32, i32>, co...
  function assert_attack_failed (line 493) | pub fn assert_attack_failed(tree: &BPlusTreeMap<i32, String>, context: &...
  function assert_attack_failed_int (line 498) | pub fn assert_attack_failed_int(tree: &BPlusTreeMap<i32, i32>, context: ...
  function test_utilities_basic_functionality (line 507) | fn test_utilities_basic_functionality() {
  function test_stress_cycle_utility (line 517) | fn test_stress_cycle_utility() {
  function test_combined_creation_utilities (line 529) | fn test_combined_creation_utilities() {
  function test_attack_patterns (line 536) | fn test_attack_patterns() {

FILE: rust/tools/parse_time_profile.py
  function main (line 17) | def main(path: str) -> int:

FILE: scripts/analyze_benchmarks.py
  function create_comparison_charts (line 40) | def create_comparison_charts():
  function create_capacity_optimization_chart (line 111) | def create_capacity_optimization_chart():
  function create_performance_ratio_chart (line 169) | def create_performance_ratio_chart():
  function print_summary (line 210) | def print_summary():

FILE: simple_time_analysis.py
  function parse_git_log (line 12) | def parse_git_log():
  function calculate_programming_sessions (line 60) | def calculate_programming_sessions(commits, max_gap_minutes=120):
  function analyze_daily_programming (line 107) | def analyze_daily_programming(sessions):
  function create_ascii_chart (line 122) | def create_ascii_chart(daily_data):
  function print_summary (line 144) | def print_summary(sessions, daily_data):
  function analyze_patterns (line 207) | def analyze_patterns(sessions, daily_data):
  function main (line 259) | def main():

FILE: visualize_programming_time.py
  function parse_git_log (line 15) | def parse_git_log():
  function calculate_programming_sessions (line 61) | def calculate_programming_sessions(commits, max_gap_minutes=120):
  function analyze_daily_programming (line 101) | def analyze_daily_programming(sessions):
  function create_comprehensive_visualization (line 116) | def create_comprehensive_visualization(sessions, daily_data):
  function main (line 351) | def main():
Condensed preview — 203 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,615K chars).
[
  {
    "path": ".claude/system_prompt_additions.md",
    "chars": 6708,
    "preview": "co# System Prompt Additions for Code Quality\n\n## Code Quality Standards\n\nNEVER write production code that contains:\n\n1. "
  },
  {
    "path": ".devcontainer/devcontainer.json",
    "chars": 1588,
    "preview": "// The Dev Container format allows you to configure your environment. At the heart of it\n// is a Docker image or Dockerf"
  },
  {
    "path": ".github/workflows/build-wheels.yml",
    "chars": 787,
    "preview": "name: Build Wheels\n\non:\n  push:\n    tags:\n      - 'v*'\n  pull_request:\n    branches: [ main ]\n  workflow_dispatch:\n\njobs"
  },
  {
    "path": ".github/workflows/performance-tracking.yml",
    "chars": 1213,
    "preview": "name: Performance Tracking\n\non:\n  push:\n    branches: [ main ]\n  schedule:\n    # Run weekly on Sundays at 00:00 UTC\n    "
  },
  {
    "path": ".github/workflows/python-ci.yml",
    "chars": 1503,
    "preview": "name: Python CI\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\njobs:\n  test:\n    runs-on: u"
  },
  {
    "path": ".github/workflows/release.yml",
    "chars": 1767,
    "preview": "name: Release\n\non:\n  push:\n    tags:\n      - 'v*'\n\njobs:\n  publish-rust:\n    runs-on: ubuntu-latest\n    \n    steps:\n    "
  },
  {
    "path": ".github/workflows/rust-ci.yml",
    "chars": 605,
    "preview": "name: Rust CI\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\njobs:\n  test:\n    runs-on: ubu"
  },
  {
    "path": ".gitignore",
    "chars": 1163,
    "preview": "# Generated by Cargo\n# will have compiled files and executables\ndebug/\ntarget/\n\n# These are backup files generated by ru"
  },
  {
    "path": ".vscode/settings.json",
    "chars": 159,
    "preview": "{\n    \"rust-analyzer.cargo.features\": [\"testing\"],\n    \"rust-analyzer.checkOnSave.allFeatures\": false,\n    \"rust-analyze"
  },
  {
    "path": "Cargo.toml",
    "chars": 361,
    "preview": "[workspace]\nmembers = [\"rust\"]\nresolver = \"2\"\n\n[workspace.package]\nversion = \"0.9.0\"\nauthors = [\"Kent Beck <kent@kentbec"
  },
  {
    "path": "LICENSE",
    "chars": 1066,
    "preview": "MIT License\n\nCopyright (c) 2025 Kent Beck\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\n"
  },
  {
    "path": "README.md",
    "chars": 6267,
    "preview": "# BPlusTree\n\nHigh-performance B+ tree implementations for **Rust** and **Python**, designed for efficient range queries "
  },
  {
    "path": "agent.md",
    "chars": 1394,
    "preview": "# Engineering Conventions for BPlusTree3\n\n- No feature flags for internal experiments. We have no external users, so avo"
  },
  {
    "path": "analyze_programming_time.py",
    "chars": 8939,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nAnalyze programming time based on commit patterns.\nCalculate time gaps between commits and vi"
  },
  {
    "path": "arena_elimination_analysis.md",
    "chars": 11414,
    "preview": "# Fundamental Challenges of Eliminating Arena-Based Allocation in Rust B+ Tree Implementations\n\n## Executive Summary\n\nAr"
  },
  {
    "path": "commits.txt",
    "chars": 20237,
    "preview": "2025-05-20 Initial commit\n2025-05-20 test: verify new tree reports empty\n2025-05-21 Merge pull request #1 from KentBeck/"
  },
  {
    "path": "docs/adr/ADR-003-compressed-node-limitations.md",
    "chars": 5110,
    "preview": "# ADR-003: Compressed Node Limitations and Future Directions\n\n## Status\nAccepted\n\n## Context\n\nDuring implementation of c"
  },
  {
    "path": "docs/delete_operations_call_graph.md",
    "chars": 7897,
    "preview": "# Delete Operations Call Graph Analysis\n\n## Overview\n\nThis document provides a comprehensive analysis of the delete oper"
  },
  {
    "path": "docs/delete_optimization_plan.md",
    "chars": 6438,
    "preview": "# Delete Operation Optimization Plan\n\n## Current Performance Analysis\n\nBased on comprehensive benchmarks, delete operati"
  },
  {
    "path": "docs/iteration_optimization_plan.md",
    "chars": 11038,
    "preview": "# Iteration Optimization Plan\n\n## Overview\n\nBased on detailed profiling analysis showing BPlusTreeMap iteration is 2.9x "
  },
  {
    "path": "python/CHANGELOG.md",
    "chars": 2468,
    "preview": "# Changelog\n\nAll notable changes to the B+ Tree Python implementation will be documented in this file.\n\nThe format is ba"
  },
  {
    "path": "python/LICENSE",
    "chars": 1066,
    "preview": "MIT License\n\nCopyright (c) 2025 Kent Beck\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\n"
  },
  {
    "path": "python/MANIFEST.in",
    "chars": 742,
    "preview": "# Include source files for C extension\ninclude bplustree_c_src/*.c\ninclude bplustree_c_src/*.h\n\n# Include documentation\n"
  },
  {
    "path": "python/README.md",
    "chars": 6632,
    "preview": "# BPlusTree - Python Implementation\n\nA high-performance B+ tree implementation for Python with competitive performance a"
  },
  {
    "path": "python/benchmarks/performance_benchmark.py",
    "chars": 8142,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nPerformance benchmark for B+ Tree implementation.\n\nThis script runs standardized benchmarks a"
  },
  {
    "path": "python/bplustree/__init__.py",
    "chars": 4576,
    "preview": "\"\"\"\nB+ Tree mapping implementation with optional C extension.\n\nThis package provides an ordered key-value mapping based "
  },
  {
    "path": "python/bplustree/bplus_tree.py",
    "chars": 35144,
    "preview": "\"\"\"\nB+ Tree implementation in Python with dict-like API.\n\nThis module provides a B+ tree data structure with a dictionar"
  },
  {
    "path": "python/bplustree_c_src/bplustree.h",
    "chars": 5528,
    "preview": "/*\n * B+ Tree C Extension Header\n * \n * Optimized C structures for high-performance B+ tree operations.\n * Uses single a"
  },
  {
    "path": "python/bplustree_c_src/bplustree_module.c",
    "chars": 12455,
    "preview": "/*\n * B+ Tree Python Extension Module\n * \n * Python C API implementation for high-performance B+ tree.\n */\n\n#define PY_S"
  },
  {
    "path": "python/bplustree_c_src/node_ops.c",
    "chars": 10856,
    "preview": "/*\n * B+ Tree Node Operations\n * \n * Core node operations optimized for performance.\n * Uses vectorized search where pos"
  },
  {
    "path": "python/bplustree_c_src/tree_ops.c",
    "chars": 6980,
    "preview": "/*\n * B+ Tree Operations\n * \n * High-level tree operations that coordinate node operations.\n */\n\n#include \"bplustree.h\"\n"
  },
  {
    "path": "python/conftest.py",
    "chars": 349,
    "preview": "\"\"\"\nPytest configuration for building the C extension before tests.\n\"\"\"\nimport sys\nimport subprocess\nfrom pathlib import"
  },
  {
    "path": "python/coverage.xml",
    "chars": 24956,
    "preview": "<?xml version=\"1.0\" ?>\n<coverage version=\"7.8.2\" timestamp=\"1751690296947\" lines-valid=\"524\" lines-covered=\"381\" line-ra"
  },
  {
    "path": "python/docs/API_REFERENCE.md",
    "chars": 9374,
    "preview": "# API Reference\n\nComplete reference for the BPlusTreeMap class and module functions.\n\n## Module Functions\n\n### `get_impl"
  },
  {
    "path": "python/docs/CAPACITY_OPTIMIZATION_ANALYSIS.md",
    "chars": 6770,
    "preview": "# B+ Tree Capacity Optimization Analysis\n\n## Overview\n\nComprehensive analysis of node capacity tradeoffs in B+ tree perf"
  },
  {
    "path": "python/docs/COMPETITIVE_ADVANTAGES.md",
    "chars": 5923,
    "preview": "# B+ Tree Competitive Advantages\n\n## 🏆 Scenarios Where Our B+ Tree Outperforms SortedDict\n\nBased on comprehensive benchm"
  },
  {
    "path": "python/docs/C_EXTENSION_IMPROVEMENT_PLAN.md",
    "chars": 4739,
    "preview": "# C Extension Improvement Plan\n\nA phased roadmap (Red → Green → Refactor, Tidy‑First) to systematically fix correctness,"
  },
  {
    "path": "python/docs/C_EXTENSION_SEGFAULT_FIX.md",
    "chars": 2633,
    "preview": "# C Extension Segfault Fix Documentation\n\n## Issue Summary\n\nThe C extension was experiencing segmentation faults during "
  },
  {
    "path": "python/docs/GA_READINESS_PLAN.md",
    "chars": 14170,
    "preview": "# Python B+ Tree Implementation - GA Readiness Plan\n\n## 🎯 Executive Summary\n\nThis document outlines the roadmap to bring"
  },
  {
    "path": "python/docs/LOOKUP_PERFORMANCE_ANALYSIS.md",
    "chars": 6560,
    "preview": "# B+ Tree Lookup Performance Analysis\n\n## 🔬 Profiler Results Summary\n\nThis document summarizes the findings from profili"
  },
  {
    "path": "python/docs/OPTIMIZATION_RESULTS.md",
    "chars": 7241,
    "preview": "# B+ Tree Performance Optimization Results\n\n## 🎯 Summary of Optimizations Implemented\n\n### Phase 1: Python Implementatio"
  },
  {
    "path": "python/docs/PERFORMANCE_HISTORY.md",
    "chars": 6691,
    "preview": "# B+ Tree Performance Optimization History\n\nThis document tracks the complete performance optimization journey with spec"
  },
  {
    "path": "python/docs/PERFORMANCE_OPTIMIZATION_PLAN.md",
    "chars": 5314,
    "preview": "# B+ Tree Performance Optimization Plan\n\n## Goal\nAchieve performance parity with Python's sortedcontainers.SortedDict wh"
  },
  {
    "path": "python/docs/README_benchmark.md",
    "chars": 4869,
    "preview": "# B+ Tree vs SortedDict Performance Benchmark\n\nThis benchmark utility compares the performance of our B+ Tree implementa"
  },
  {
    "path": "python/docs/STRUCTURAL_IMPROVEMENTS.md",
    "chars": 3850,
    "preview": "# Structural Improvements: Node Helper Methods\n\n## 🎯 **Problem Identified**\nThe tree manipulation code was scattered wit"
  },
  {
    "path": "python/docs/THREAD_SAFETY.md",
    "chars": 6761,
    "preview": "# Thread Safety Analysis - Python B+ Tree Implementation\n\n## Executive Summary\n\nThe Python B+ Tree implementation (`BPlu"
  },
  {
    "path": "python/docs/advanced_usage.md",
    "chars": 18492,
    "preview": "# Advanced Usage Guide\n\n## Capacity Tuning\n\nThe `capacity` parameter is the most important performance tuning knob for B"
  },
  {
    "path": "python/docs/installation.md",
    "chars": 4116,
    "preview": "# Installation Guide\n\n## Requirements\n\n- Python 3.8 or higher\n- C compiler (optional, for C extension)\n- pip package man"
  },
  {
    "path": "python/docs/migration_guide.md",
    "chars": 10524,
    "preview": "# Migration Guide\n\n## Migrating from dict\n\nBPlusTreeMap implements the full dict interface, making migration straightfor"
  },
  {
    "path": "python/docs/performance_guide.md",
    "chars": 9676,
    "preview": "# Performance Guide\n\n## When to Use B+ Tree vs Alternatives\n\n### B+ Tree Strengths\n\nBPlusTreeMap excels in these scenari"
  },
  {
    "path": "python/docs/quickstart.md",
    "chars": 5242,
    "preview": "# Quickstart Guide\n\nGet up and running with BPlusTree in 5 minutes!\n\n## Basic Usage\n\n### Creating a B+ Tree\n\n```python\nf"
  },
  {
    "path": "python/docs/troubleshooting.md",
    "chars": 15337,
    "preview": "# Troubleshooting Guide\n\n## Installation Issues\n\n### C Extension Build Failures\n\n#### Problem: \"Microsoft Visual C++ 14."
  },
  {
    "path": "python/examples/basic_usage.py",
    "chars": 4368,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nBasic usage examples for BPlusTree.\n\nThis example demonstrates the fundamental operations you"
  },
  {
    "path": "python/examples/migration_guide.py",
    "chars": 9648,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nMigration guide for switching from dict/SortedDict to BPlusTree.\n\nThis example shows how to m"
  },
  {
    "path": "python/examples/performance_demo.py",
    "chars": 10658,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nPerformance demonstration comparing BPlusTree vs standard dict and other data structures.\n\nTh"
  },
  {
    "path": "python/examples/range_queries.py",
    "chars": 7698,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nRange query examples for BPlusTree.\n\nThis example demonstrates the B+ Tree's powerful range q"
  },
  {
    "path": "python/py.typed",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "python/pyproject.toml",
    "chars": 4826,
    "preview": "[build-system]\nrequires = [\"setuptools>=64\", \"wheel>=0.37\", \"Cython>=0.29.30\"]\nbuild-backend = \"setuptools.build_meta\"\n\n"
  },
  {
    "path": "python/setup.py",
    "chars": 4633,
    "preview": "\"\"\"\nSetup script for B+ Tree package with C extension.\n\nThis setup.py works with pyproject.toml for modern Python packag"
  },
  {
    "path": "python/tests/__init__.py",
    "chars": 26,
    "preview": "\"\"\"B+ Tree test suite.\"\"\"\n"
  },
  {
    "path": "python/tests/_invariant_checker.py",
    "chars": 11592,
    "preview": "\"\"\"\nPrivate invariant checker for B+ Tree validation.\n\nThis module contains the internal validation logic for ensuring B"
  },
  {
    "path": "python/tests/comprehensive_fuzz_test.py",
    "chars": 9800,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nComprehensive fuzz testing with different capacities and initial loads.\nTests the robustness "
  },
  {
    "path": "python/tests/fuzz_test.py",
    "chars": 21211,
    "preview": "\"\"\"\nComprehensive fuzz tester for B+ Tree implementation.\n\nThis tester performs a million random operations and compares"
  },
  {
    "path": "python/tests/test_bplus_tree.py",
    "chars": 29422,
    "preview": "\"\"\"\nTests for B+ Tree implementation\n\"\"\"\n\nimport pytest\nfrom bplustree.bplus_tree import BPlusTreeMap, LeafNode, BranchN"
  },
  {
    "path": "python/tests/test_c_extension.py",
    "chars": 6294,
    "preview": "\"\"\"\nTest the C extension implementation.\nThis verifies that the C extension works correctly and measures its performance"
  },
  {
    "path": "python/tests/test_c_extension_comprehensive.py",
    "chars": 9141,
    "preview": "\"\"\"\nComprehensive test suite for C extension to identify and fix all bugs.\n\"\"\"\n\nimport sys\nimport os\nimport random\n\nsys."
  },
  {
    "path": "python/tests/test_c_extension_segfault_fix.py",
    "chars": 6083,
    "preview": "\"\"\"\nTest that the C extension segfault issue has been fixed.\n\nThis test specifically targets the reference counting bug "
  },
  {
    "path": "python/tests/test_compile_flags.py",
    "chars": 320,
    "preview": "import os\nimport pytest\n\n\ndef test_no_unsafe_compile_flags():\n    if os.environ.get(\"BPLUSTREE_C_FAST_MATH\"):\n        py"
  },
  {
    "path": "python/tests/test_data_alignment.py",
    "chars": 614,
    "preview": "import pytest\n\ntry:\n    import bplustree_c\nexcept ImportError as e:\n    pytest.skip(f\"C extension not available: {e}\", a"
  },
  {
    "path": "python/tests/test_dictionary_api.py",
    "chars": 12748,
    "preview": "\"\"\"\nTest the complete dictionary API for BPlusTreeMap.\n\nThis module tests all dictionary-like methods to ensure compatib"
  },
  {
    "path": "python/tests/test_docstyle.py",
    "chars": 569,
    "preview": "import os\nimport sys\nimport subprocess\n\nimport pytest\n\n\ndef test_pydocstyle_conformance():\n    pytest.importorskip(\"pydo"
  },
  {
    "path": "python/tests/test_fuzz_discovered_patterns.py",
    "chars": 10136,
    "preview": "\"\"\"\nTest cases based on patterns discovered by fuzz testing.\n\nThese tests exercise specific operation sequences that wer"
  },
  {
    "path": "python/tests/test_gc_support.py",
    "chars": 704,
    "preview": "import gc\nimport pytest\n\ntry:\n    from bplustree_c import BPlusTree\nexcept ImportError as e:\n    pytest.skip(f\"C extensi"
  },
  {
    "path": "python/tests/test_gprof_harness.py",
    "chars": 1252,
    "preview": "import pytest\n\npytest.skip(\n    \"gprof profiling harness (requires custom build with -pg); see docs for setup\",\n    allo"
  },
  {
    "path": "python/tests/test_import_error_fallback.py",
    "chars": 1118,
    "preview": "import sys\nimport shutil\nimport importlib\nfrom pathlib import Path\n\nimport pytest\n\n\ndef test_extension_import_error_trig"
  },
  {
    "path": "python/tests/test_invariant_bug.py",
    "chars": 1913,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nTest to expose the missing invariant check for minimum children\n\"\"\"\n\nfrom bplustree.bplus_tre"
  },
  {
    "path": "python/tests/test_iterator.py",
    "chars": 5053,
    "preview": "\"\"\"Tests for B+ Tree iterator functionality\"\"\"\n\nimport pytest\nfrom bplustree import BPlusTreeMap\n\n\nclass TestBPlusTreeIt"
  },
  {
    "path": "python/tests/test_iterator_modification_safety.py",
    "chars": 10655,
    "preview": "\"\"\"\nTest for iterator modification safety fix.\n\nThis test verifies that the modification counter prevents segfaults by\np"
  },
  {
    "path": "python/tests/test_leak_detection.py",
    "chars": 1276,
    "preview": "import tracemalloc\nimport gc\n\nimport pytest\n\nfrom bplustree import BPlusTreeMap as BPlusTree\n\n\ndef test_no_memory_leak_o"
  },
  {
    "path": "python/tests/test_max_occupancy_bug.py",
    "chars": 6865,
    "preview": "\"\"\"Detailed tests to reproduce the maximum occupancy bug\"\"\"\n\nimport pytest\nfrom bplustree.bplus_tree import BPlusTreeMap"
  },
  {
    "path": "python/tests/test_memory_leaks.py",
    "chars": 8109,
    "preview": "\"\"\"\nMemory leak detection tests for B+ Tree implementation.\n\nThese tests ensure that the implementation properly manages"
  },
  {
    "path": "python/tests/test_multithreaded_lookup.py",
    "chars": 1583,
    "preview": "import pytest\n\ntry:\n    from bplustree_c import BPlusTree\nexcept ImportError as e:\n    pytest.skip(f\"C extension not ava"
  },
  {
    "path": "python/tests/test_no_segfaults.py",
    "chars": 8407,
    "preview": "\"\"\"\nTest that ensures NO segfaults occur under any circumstances.\nA segfault is always a critical bug that must be fixed"
  },
  {
    "path": "python/tests/test_node_split_minimal.py",
    "chars": 2450,
    "preview": "\"\"\"\nMinimal test for node split bug - smallest possible failing test.\nFollowing TDD: write the smallest test that replic"
  },
  {
    "path": "python/tests/test_optimized_bplus_tree.py",
    "chars": 12051,
    "preview": "\"\"\"\nTest optimized B+ tree implementation with single array nodes.\nThis creates a modified B+ tree that uses the single "
  },
  {
    "path": "python/tests/test_performance_baseline.py",
    "chars": 5275,
    "preview": "\"\"\"\nTest to establish baseline performance metrics before optimization.\nThis will measure the current implementation and"
  },
  {
    "path": "python/tests/test_performance_benchmarks.py",
    "chars": 14936,
    "preview": "\"\"\"\nPerformance benchmark tests for B+ Tree implementation.\n\nThese tests verify that performance meets expected threshol"
  },
  {
    "path": "python/tests/test_performance_regression.py",
    "chars": 9772,
    "preview": "\"\"\"\nPerformance regression tests for B+ Tree implementation.\n\nThese tests ensure that performance characteristics remain"
  },
  {
    "path": "python/tests/test_performance_vs_sorteddict.py",
    "chars": 5537,
    "preview": "\"\"\"\nCompare B+ Tree performance against sortedcontainers.SortedDict.\nThis test will show the performance gap we need to "
  },
  {
    "path": "python/tests/test_prefetch_microbench.py",
    "chars": 1519,
    "preview": "import pytest\n\npytest.skip(\n    \"Prefetch microbenchmark harness (requires rebuild with -DPREFETCH_HINTS); see docstring"
  },
  {
    "path": "python/tests/test_proper_deletion.py",
    "chars": 3871,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nTest proper deletion logic that maintains invariants throughout\n\"\"\"\n\nfrom bplustree import BP"
  },
  {
    "path": "python/tests/test_segfault_regression.py",
    "chars": 2549,
    "preview": "\"\"\"\nRegression test for segfault bug.\nFollowing TDD: write a failing test that replicates the problem, then fix it.\n\"\"\"\n"
  },
  {
    "path": "python/tests/test_single_array_int_optimization.py",
    "chars": 9040,
    "preview": "\"\"\"\nTest single array optimization with integer keys/values only.\nThis minimizes Python object overhead to better measur"
  },
  {
    "path": "python/tests/test_single_child_parent.py",
    "chars": 779,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nSimple test for the single-child parent edge case\n\"\"\"\n\nimport pytest\nfrom bplustree import BP"
  },
  {
    "path": "python/tests/test_stress_edge_cases.py",
    "chars": 9006,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nStress tests for B+ tree edge cases based on fuzz testing patterns.\nThese tests target specif"
  },
  {
    "path": "python/tests/test_stress_large_datasets.py",
    "chars": 8540,
    "preview": "\"\"\"\nStress tests with large datasets for B+ Tree implementation.\n\nThese tests ensure the implementation can handle large"
  },
  {
    "path": "rust/API_COMPLETION_ROADMAP.md",
    "chars": 4010,
    "preview": "# Missing BPlusTreeMap Functions - Implementation Roadmap\n\n## Critical Missing Functions (Must Implement)\n\n### 1. Entry "
  },
  {
    "path": "rust/API_COMPLETION_STATUS.md",
    "chars": 4523,
    "preview": "# BPlusTreeMap API Completion Status\n\n## Current Implementation Status\n\n### ✅ Implemented Core Functions\n\n**Construction"
  },
  {
    "path": "rust/BTREEMAP_COMPARISON.md",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "rust/BTREE_ADVANTAGES.md",
    "chars": 3926,
    "preview": "# When BTreeMap Outperforms BPlusTreeMap\n\nBased on comprehensive benchmarking and analysis, here are the specific scenar"
  },
  {
    "path": "rust/Cargo.toml",
    "chars": 693,
    "preview": "[package]\nname = \"bplustree\"\nversion.workspace = true\nedition.workspace = true\nauthors.workspace = true\ndescription = \"A"
  },
  {
    "path": "rust/DELETE_PROFILING_REPORT.md",
    "chars": 4969,
    "preview": "# Delete Operation Profiling Report\n\n## Executive Summary\n\nBased on comprehensive profiling of the B+ tree delete operat"
  },
  {
    "path": "rust/ENTRY_API_TRADEOFFS.md",
    "chars": 7465,
    "preview": "# Entry API Implementation: Vec<K> + Vec<V> vs Vec<(K, V)> Tradeoffs\n\n## Current Structure: Separate Vectors\n```rust\npub"
  },
  {
    "path": "rust/HOTSPOT_ANALYSIS.md",
    "chars": 5087,
    "preview": "# Delete Operation Hotspot Analysis\n\n## Summary\n\nLine & function level profiling of the B+ tree delete operation has ide"
  },
  {
    "path": "rust/IMPLEMENTATION_ANALYSIS.md",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "rust/MEMORY_OPTIMIZATION_PLAN.md",
    "chars": 8005,
    "preview": "# Memory Optimization Plan for BPlusTreeMap\n\nBased on detailed analysis, this document outlines a comprehensive plan to "
  },
  {
    "path": "rust/MEMORY_OPTIMIZATION_RESULTS.md",
    "chars": 6664,
    "preview": "# Memory Optimization Results\n\nThis document summarizes the results of implementing Phase 1 memory optimizations for BPl"
  },
  {
    "path": "rust/MODULARIZATION_PLAN.md",
    "chars": 11480,
    "preview": "# BPlusTreeMap Modularization Plan\n\n## Overview\n\nThe current `lib.rs` is 3,138 lines and contains multiple concerns mixe"
  },
  {
    "path": "rust/MODULARIZATION_PLAN_REVISED.md",
    "chars": 20350,
    "preview": "# BPlusTreeMap Modularization Plan (Operation-Based) - UPDATED STATUS\n\n## Overview\n\nThe current `lib.rs` is now 1,732 li"
  },
  {
    "path": "rust/PERFORMANCE_ANALYSIS.md",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "rust/PERFORMANCE_LOG.md",
    "chars": 20660,
    "preview": "# B+ Tree Performance Optimization Log\n\n## Baseline Performance (Before Clone Optimization)\n\n### Test Configuration\n- **"
  },
  {
    "path": "rust/RANGE_SCAN_PROFILING_REPORT.md",
    "chars": 6799,
    "preview": "# Rust BPlusTreeMap Range Scan Profiling Report\n\n## Executive Summary\n\nThis report analyzes the performance characterist"
  },
  {
    "path": "rust/README.md",
    "chars": 3415,
    "preview": "# BPlusTree - Rust Implementation\n\nA high-performance B+ tree implementation in Rust with a dictionary-like API, optimiz"
  },
  {
    "path": "rust/RECOMMENDATIONS.md",
    "chars": 5721,
    "preview": "# Data Structure Selection Guide: BTreeMap vs BPlusTreeMap\n\nThis guide provides objective, data-driven recommendations f"
  },
  {
    "path": "rust/RUNTIME_PERFORMANCE_ANALYSIS.md",
    "chars": 8079,
    "preview": "# Runtime Performance Impact Analysis\n\nThis document provides a comprehensive analysis of the runtime performance impact"
  },
  {
    "path": "rust/benches/comparison.rs",
    "chars": 13992,
    "preview": "use bplustree::BPlusTreeMap;\nuse criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};\nuse ra"
  },
  {
    "path": "rust/benches/profiling_benchmark.rs",
    "chars": 7937,
    "preview": "use bplustree::BPlusTreeMap;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion};\nuse rand::prelude::"
  },
  {
    "path": "rust/benches/quick_clone_bench.rs",
    "chars": 2182,
    "preview": "use bplustree::BPlusTreeMap;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion};\n\nfn benchmark_key_o"
  },
  {
    "path": "rust/benches/range_scan_profiling.rs",
    "chars": 8701,
    "preview": "use bplustree::BPlusTreeMap;\nuse criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};\nuse ra"
  },
  {
    "path": "rust/docs/BENCHMARK_RESULTS.md",
    "chars": 5920,
    "preview": "# B+ Tree vs BTreeMap Performance Comparison\n\n## Executive Summary\n\nOur B+ Tree implementation shows **competitive perfo"
  },
  {
    "path": "rust/docs/CLAUDE.md",
    "chars": 3445,
    "preview": "Always follow the instructions in plan.md. When I say \"go\", find the next unmarked test in plan.md, implement the test, "
  },
  {
    "path": "rust/docs/CODE_DUPLICATION_ANALYSIS.md",
    "chars": 14413,
    "preview": "# B+ Tree Code Duplication Analysis & Missing Abstractions\n\n## Executive Summary\n\nAfter analyzing the Rust codebase, I'v"
  },
  {
    "path": "rust/docs/COPY_PASTE_DETECTOR_SUMMARY.md",
    "chars": 6699,
    "preview": "# Copy/Paste Detector Analysis: B+ Tree Rust Codebase\n\n## 🎯 Executive Summary\n\nThe copy/paste detector analysis reveals "
  },
  {
    "path": "rust/docs/FRESH_BENCHMARK_RESULTS_2025.md",
    "chars": 5940,
    "preview": "# Fresh Benchmark Results - January 2025\n\n## Test Environment\n- **Date**: January 8, 2025\n- **Hardware**: x86_64 Linux ("
  },
  {
    "path": "rust/docs/PERFORMANCE_BENCHMARKS.md",
    "chars": 5223,
    "preview": "# BPlusTreeMap Performance Benchmarks\n\nThis document contains the latest benchmark results comparing BPlusTreeMap agains"
  },
  {
    "path": "rust/docs/PROJECT_STATUS.md",
    "chars": 4377,
    "preview": "# B+ Tree Project Status\n\n## Overview\nThis document tracks the progress of the B+ Tree implementation in Rust, following"
  },
  {
    "path": "rust/docs/RANGE_OPTIMIZATION_SUMMARY.md",
    "chars": 5687,
    "preview": "# B+ Tree Range Query Optimization: Executive Summary\n\n## The Problem\n\nOur current B+ Tree implementation has a **critic"
  },
  {
    "path": "rust/docs/RANGE_QUERY_OPTIMIZATION_PLAN.md",
    "chars": 13963,
    "preview": "# B+ Tree Range Query Optimization Plan\n\n## Problem Analysis\n\n### Current Implementation Issues\nOur current range query "
  },
  {
    "path": "rust/docs/TEST_RELIABILITY_PLAN.md",
    "chars": 4388,
    "preview": "# B+ Tree Reliability Test Plan\n\n## Goal: Demonstrate Unreliability Through Adversarial Testing\n\n### Philosophy\nWe're no"
  },
  {
    "path": "rust/docs/UPDATED_COPY_PASTE_ANALYSIS.md",
    "chars": 13402,
    "preview": "# Updated Copy/Paste Detector Analysis: B+ Tree Rust Codebase\n\n## 🎯 Executive Summary\n\nAfter the latest PHASE 2 refactor"
  },
  {
    "path": "rust/docs/arena-allocation-learnings.md",
    "chars": 4369,
    "preview": "# Arena Allocation Implementation Learnings\n\n## Summary of Attempt\n\nAttempted to implement arena-based leaf allocation f"
  },
  {
    "path": "rust/docs/arena_migration_plan.md",
    "chars": 1535,
    "preview": "# Plan for Removing Non-Arena Node Variants\n\n## Current State Analysis\nThe codebase currently has four `NodeRef` variant"
  },
  {
    "path": "rust/docs/claude_refactoring.md",
    "chars": 12132,
    "preview": "# B+ Tree Refactoring Plan: Helper Functions for Code Simplification\n\nGenerated on: January 6, 2025\n\n## Executive Summar"
  },
  {
    "path": "rust/docs/code_coverage_analysis.md",
    "chars": 8449,
    "preview": "# Code Coverage Analysis Report\n\nGenerated on: June 3, 2025\n\n## Overview\n\nThis document provides a comprehensive analysi"
  },
  {
    "path": "rust/docs/codex_refactoring.md",
    "chars": 3280,
    "preview": "# Refactoring Plan: Helper APIs & Code Simplification\n\nThis document outlines a phased approach to introduce reusable he"
  },
  {
    "path": "rust/docs/concurrency_locking_strategies.md",
    "chars": 8746,
    "preview": "# Concurrency Control in B+ Trees: Global Lock vs Fine-Grained Node Locking\n\nThis document analyzes two fundamental appr"
  },
  {
    "path": "rust/docs/optimal_capacity_analysis.md",
    "chars": 4113,
    "preview": "# B+ Tree Optimal Capacity Analysis\n\n## Executive Summary\n\nAfter extensive benchmarking, we found that **capacity 64-128"
  },
  {
    "path": "rust/docs/parallel_vectors_vs_entries.md",
    "chars": 6145,
    "preview": "# Design Decision: Parallel Vectors vs Single Entry Vector in LeafNode\n\nThis document analyzes the design tradeoff betwe"
  },
  {
    "path": "rust/docs/rust_performance_history.md",
    "chars": 10763,
    "preview": "# Rust B+ Tree Performance History\n\nThis document tracks the performance evolution of the Rust B+ tree implementation co"
  },
  {
    "path": "rust/examples/comprehensive_comparison.rs",
    "chars": 13256,
    "preview": "//! Comprehensive and objective comparison between BTreeMap and BPlusTreeMap\n//! This benchmark aims to demonstrate wher"
  },
  {
    "path": "rust/examples/find_optimal_capacity.rs",
    "chars": 4499,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::{Duration, Instant};\n\nconst ITERATIONS: usiz"
  },
  {
    "path": "rust/examples/quick_perf.rs",
    "chars": 2679,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"Quick Pe"
  },
  {
    "path": "rust/examples/range_syntax_demo.rs",
    "chars": 2835,
    "preview": "use bplustree::BPlusTreeMap;\n\nfn main() {\n    println!(\"B+ Tree Range Syntax Demo\");\n    println!(\"====================="
  },
  {
    "path": "rust/examples/readme_examples.rs",
    "chars": 3548,
    "preview": "use bplustree::BPlusTreeMap;\n\nfn main() {\n    println!(\"Running README examples...\");\n\n    // Quick Start example\n    qu"
  },
  {
    "path": "rust/focused_results/custom_analysis.rs",
    "chars": 3070,
    "preview": "use std::time::{Duration, Instant};\nuse std::collections::HashMap;\n\nfn main() {\n    println!(\"=== Custom Performance Ana"
  },
  {
    "path": "rust/profiling_results/analysis_report.md",
    "chars": 5825,
    "preview": "# BPlusTreeMap Range Scan Performance Analysis\n\n## Executive Summary\n\nBased on the profiling results, we can identify se"
  },
  {
    "path": "rust/profiling_results/timing_analysis.rs",
    "chars": 2809,
    "preview": "use std::time::{Duration, Instant};\nuse bplustree::BPlusTreeMap;\n\nfn main() {\n    println!(\"=== Custom Timing Analysis f"
  },
  {
    "path": "rust/src/bin/arena_profile.rs",
    "chars": 3484,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== Arena Access Performance Profile ==="
  },
  {
    "path": "rust/src/bin/bound_check_test.rs",
    "chars": 2559,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== Bound Checking Overhead Test ===\\n\")"
  },
  {
    "path": "rust/src/bin/delete_profiler.rs",
    "chars": 4401,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"Delete Operation Profiler\");\n    println"
  },
  {
    "path": "rust/src/bin/detailed_delete_profiler.rs",
    "chars": 4280,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"Detailed Delete Operation Profiler\");\n  "
  },
  {
    "path": "rust/src/bin/function_profiler.rs",
    "chars": 6914,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::HashMap;\nuse std::time::{Duration, Instant};\n\nstruct ProfileData {\n  "
  },
  {
    "path": "rust/src/bin/instruments_delete_target.rs",
    "chars": 2177,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::{Duration, Instant};\n\n// A long-running delete-focused workload for Instrume"
  },
  {
    "path": "rust/src/bin/large_delete_benchmark.rs",
    "chars": 3036,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::Instant;\n\n// Large-scale delete benchmark co"
  },
  {
    "path": "rust/src/bin/micro_range_bench.rs",
    "chars": 3046,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== Micro Range Benchmark ===\\n\");\n\n    "
  },
  {
    "path": "rust/src/bin/profile_functions.rs",
    "chars": 9678,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== BPlusTree Function-Level Performance"
  },
  {
    "path": "rust/src/bin/range_comparison.rs",
    "chars": 9007,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== BTre"
  },
  {
    "path": "rust/src/bin/range_profile.rs",
    "chars": 5371,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::time::Instant;\n\nfn main() {\n    println!(\"=== Range Operation Performance Deep Div"
  },
  {
    "path": "rust/src/compact_arena.rs",
    "chars": 15063,
    "preview": "//! Compact arena implementation using Vec<T> instead of Vec<Option<T>>\n//! This eliminates the Option wrapper overhead "
  },
  {
    "path": "rust/src/comprehensive_performance_benchmark.rs",
    "chars": 7798,
    "preview": "use crate::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::Instant;\n\n/// Comprehensive performance benchmar"
  },
  {
    "path": "rust/src/construction.rs",
    "chars": 11252,
    "preview": "//! Construction and initialization logic for BPlusTreeMap and nodes.\n//!\n//! This module contains all the construction,"
  },
  {
    "path": "rust/src/delete_operations.rs",
    "chars": 31155,
    "preview": "//! DELETE operations for BPlusTreeMap.\n//!\n//! This module contains all the deletion operations for the B+ tree, includ"
  },
  {
    "path": "rust/src/detailed_iterator_analysis.rs",
    "chars": 7338,
    "preview": "use crate::BPlusTreeMap;\nuse std::collections::BTreeMap;\nuse std::time::Instant;\n\n/// Detailed analysis of what actually"
  },
  {
    "path": "rust/src/error.rs",
    "chars": 5969,
    "preview": "//! Error handling and result types for BPlusTreeMap operations.\n//!\n//! This module provides comprehensive error handli"
  },
  {
    "path": "rust/src/get_operations.rs",
    "chars": 12433,
    "preview": "//! GET operations for BPlusTreeMap.\n//!\n//! This module contains all the read operations for the B+ tree, including\n//!"
  },
  {
    "path": "rust/src/insert_operations.rs",
    "chars": 12501,
    "preview": "//! INSERT operations for BPlusTreeMap.\n//!\n//! This module contains all the insertion operations for the B+ tree, inclu"
  },
  {
    "path": "rust/src/iteration.rs",
    "chars": 14672,
    "preview": "//! Iterator implementations for BPlusTreeMap.\n//!\n//! This module contains all iterator types and their implementations"
  },
  {
    "path": "rust/src/lib.rs",
    "chars": 7846,
    "preview": "//! B+ Tree implementation in Rust with dict-like API.\n//!\n//! This module provides a B+ tree data structure with a dict"
  },
  {
    "path": "rust/src/macros.rs",
    "chars": 9852,
    "preview": "//! Macros to eliminate repetitive patterns in B+ Tree operations and testing\n\n/// Macro to eliminate repetitive invaria"
  },
  {
    "path": "rust/src/node.rs",
    "chars": 25656,
    "preview": "//! Node implementations for BPlusTreeMap.\n//!\n//! This module contains the complete implementations for LeafNode and Br"
  },
  {
    "path": "rust/src/range_queries.rs",
    "chars": 4014,
    "preview": "//! Range query operations for BPlusTreeMap.\n//!\n//! This module contains all range-related operations including range i"
  },
  {
    "path": "rust/src/tree_structure.rs",
    "chars": 8946,
    "preview": "//! Tree structure management operations for BPlusTreeMap.\n//!\n//! This module contains all tree-level operations that m"
  },
  {
    "path": "rust/src/types.rs",
    "chars": 5910,
    "preview": "//! Core types and data structures for BPlusTreeMap.\n//!\n//! This module contains all the fundamental data structures, t"
  },
  {
    "path": "rust/src/validation.rs",
    "chars": 13958,
    "preview": "//! Validation and debugging utilities for BPlusTreeMap.\n//!\n//! This module contains all validation methods, invariant "
  },
  {
    "path": "rust/tests/adversarial_arena_corruption.rs",
    "chars": 7101,
    "preview": "use bplustree::{assert_tree_valid, verify_attack_result};\n\nmod test_utils;\nuse test_utils::*;\n\n/// These tests target th"
  },
  {
    "path": "rust/tests/adversarial_branch_rebalancing.rs",
    "chars": 8849,
    "preview": "mod test_utils;\nuse test_utils::*;\n\n/// These tests are designed to break the B+ tree implementation by targeting\n/// th"
  },
  {
    "path": "rust/tests/adversarial_edge_cases.rs",
    "chars": 9815,
    "preview": "mod test_utils;\nuse test_utils::*;\n\n/// Final adversarial tests targeting root collapse logic, capacity boundaries,\n/// "
  },
  {
    "path": "rust/tests/adversarial_linked_list.rs",
    "chars": 10848,
    "preview": "mod test_utils;\nuse std::collections::HashSet;\nuse test_utils::*;\n\n/// These tests target the linked list maintenance ac"
  },
  {
    "path": "rust/tests/bplus_tree.rs",
    "chars": 68212,
    "preview": "use bplustree::{BPlusTreeError, BPlusTreeMap, NodeRef};\nuse std::marker::PhantomData;\n\nmod test_utils;\nuse test_utils::*"
  },
  {
    "path": "rust/tests/bug_reproduction_tests.rs",
    "chars": 9896,
    "preview": "/// Test cases to reproduce specific bugs found in the B+ tree implementation\n/// Each test demonstrates a concrete fail"
  },
  {
    "path": "rust/tests/critical_bug_test.rs",
    "chars": 5767,
    "preview": "/// Test to verify linked list integrity during merge operations\n/// These tests ensure proper linked list maintenance d"
  },
  {
    "path": "rust/tests/debug_infinite_loop.rs",
    "chars": 2138,
    "preview": "/// Debug test to find the infinite loop\nuse bplustree::BPlusTreeMap;\n\nmod test_utils;\nuse test_utils::*;\n\n#[test]\nfn te"
  },
  {
    "path": "rust/tests/enhanced_error_handling.rs",
    "chars": 13604,
    "preview": "//! Enhanced error handling tests\n//! These tests verify the improved error handling patterns, Result type aliases,\n//! "
  },
  {
    "path": "rust/tests/error_handling_consistency.rs",
    "chars": 10927,
    "preview": "//! Error handling consistency tests\n//! These tests verify that the B+ tree implementation uses consistent error handli"
  },
  {
    "path": "rust/tests/fuzz_tests.rs",
    "chars": 17464,
    "preview": "//! Fuzz tests for BPlusTree\n//!\n//! These tests are marked with `#[ignore]` so they don't run during normal `cargo test"
  },
  {
    "path": "rust/tests/linked_list_corruption_detection.rs",
    "chars": 11040,
    "preview": "//! Linked list integrity verification tests\n//! These tests verify proper linked list maintenance during merge operatio"
  },
  {
    "path": "rust/tests/memory_leak_detection.rs",
    "chars": 11782,
    "preview": "//! Memory leak regression tests for B+ tree implementation\n//! These tests prevent memory leaks from being reintroduced"
  },
  {
    "path": "rust/tests/memory_safety_audit.rs",
    "chars": 7983,
    "preview": "//! Memory safety audit tests\n//! These tests verify that all type conversions are properly bounds-checked\n\nuse bplustre"
  },
  {
    "path": "rust/tests/range_bounds_syntax.rs",
    "chars": 5545,
    "preview": "use bplustree::BPlusTreeMap;\n\n#[test]\nfn test_range_syntax_inclusive() {\n    let mut tree = BPlusTreeMap::new(16).unwrap"
  },
  {
    "path": "rust/tests/range_differential.rs",
    "chars": 4236,
    "preview": "use bplustree::BPlusTreeMap;\nuse std::collections::BTreeMap;\n\nfn populate_maps(capacity: usize, data: &[i32]) -> (BPlusT"
  },
  {
    "path": "rust/tests/remove_operations.rs",
    "chars": 16591,
    "preview": "use bplustree::BPlusTreeMap;\n\nmod test_utils;\nuse test_utils::*;\n\n#[test]\nfn test_underfull_child_rebalancing_path() {\n "
  },
  {
    "path": "rust/tests/simple_bug_tests.rs",
    "chars": 7430,
    "preview": "/// Simplified tests to demonstrate specific bugs in the B+ tree implementation\nmod test_utils;\nuse test_utils::*;\n\n#[te"
  },
  {
    "path": "rust/tests/specific_bug_demos.rs",
    "chars": 10775,
    "preview": "/// Tests that specifically demonstrate the identified bugs with clear evidence\nuse bplustree::BPlusTreeMap;\n\nmod test_u"
  },
  {
    "path": "rust/tests/test_utils.rs",
    "chars": 17874,
    "preview": "#![allow(dead_code)] // Allow unused utility functions for future tests\n\n/// Comprehensive test utilities to eliminate m"
  },
  {
    "path": "rust/tools/parse_time_profile.py",
    "chars": 1619,
    "preview": "#!/usr/bin/env python3\nimport sys\nimport xml.etree.ElementTree as ET\nfrom collections import Counter\n\n\"\"\"\nBest-effort pa"
  },
  {
    "path": "rust-toolchain.toml",
    "chars": 31,
    "preview": "[toolchain]\nchannel = \"stable\"\n"
  },
  {
    "path": "scripts/analyze_benchmarks.py",
    "chars": 7649,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nSimple script to analyze and visualize B+ tree benchmark results.\n\"\"\"\n\nimport matplotlib.pypl"
  },
  {
    "path": "scripts/instruments_export.sh",
    "chars": 1503,
    "preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nTRACE_PATH=${1:-rust/delete_profile.trace}\nOUT_DIR=${2:-rust/delete_export}\n\nmkdi"
  },
  {
    "path": "scripts/precommit.sh",
    "chars": 300,
    "preview": "#!/usr/bin/env bash\nset -euo pipefail\n\necho \"[pre-commit] Formatting (cargo fmt --all)\"\ncargo fmt --all\n\necho \"[pre-comm"
  },
  {
    "path": "simple_time_analysis.py",
    "chars": 9229,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nAnalyze programming time based on commit patterns.\nSimple version without matplotlib dependen"
  }
]

// ... and 3 more files (download for full content)

About this extraction

This page contains the full source code of the KentBeck/BPlusTree3 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 203 files (1.5 MB), approximately 378.3k tokens, and a symbol index with 1173 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!