Full Code of ethereum/sharding for AI

master 09b8f3a73e1d cached
50 files
184.7 KB
54.3k tokens
139 symbols
1 requests
Download .txt
Repository: ethereum/sharding
Branch: master
Commit: 09b8f3a73e1d
Files: 50
Total size: 184.7 KB

Directory structure:
gitextract_jgzbq6ot/

├── .bumpversion.cfg
├── .github/
│   ├── ISSUE_TEMPLATE.md
│   └── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .travis.yml
├── MANIFEST.in
├── Makefile
├── README.md
├── docs/
│   ├── doc.html
│   └── doc.md
├── requirements-dev.txt
├── requirements.txt
├── setup.py
├── sharding/
│   ├── __init__.py
│   ├── contracts/
│   │   ├── __init__.py
│   │   ├── sharding_manager.json
│   │   ├── sharding_manager.v.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── config.py
│   │       └── smc_utils.py
│   └── handler/
│       ├── __init__.py
│       ├── exceptions.py
│       ├── log_handler.py
│       ├── shard_tracker.py
│       ├── smc_handler.py
│       └── utils/
│           ├── __init__.py
│           ├── log_parser.py
│           ├── shard_tracker_utils.py
│           ├── smc_handler_utils.py
│           └── web3_utils.py
├── tests/
│   ├── __init__.py
│   ├── conftest.py
│   ├── contract/
│   │   ├── __init__.py
│   │   ├── test_add_header.py
│   │   ├── test_compile.py
│   │   ├── test_log_emission.py
│   │   ├── test_notary_sample.py
│   │   ├── test_registry_management.py
│   │   ├── test_submit_vote.py
│   │   └── utils/
│   │       ├── common_utils.py
│   │       ├── notary_account.py
│   │       └── sample_helper.py
│   └── handler/
│       ├── __init__.py
│       ├── test_log_handler.py
│       ├── test_shard_tracker.py
│       ├── test_smc_handler.py
│       └── utils/
│           ├── __init__.py
│           └── config.py
├── tools/
│   └── vyper_compile_script.py
└── tox.ini

================================================
FILE CONTENTS
================================================

================================================
FILE: .bumpversion.cfg
================================================
[bumpversion]
current_version = 0.0.2-alpha.2
commit = True
tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(-(?P<stage>[^.]*)\.(?P<devnum>\d+))?
serialize = 
	{major}.{minor}.{patch}-{stage}.{devnum}
	{major}.{minor}.{patch}

[bumpversion:part:stage]
optional_value = stable
first_value = stable
values = 
	alpha
	beta
	stable

[bumpversion:part:devnum]

[bumpversion:file:setup.py]
search = version='{current_version}',
replace = version='{new_version}',



================================================
FILE: .github/ISSUE_TEMPLATE.md
================================================
* OS: osx/linux/win
* Environment (output of `pip freeze`):
    * Python version
    * Vyper version
    * py-evm version

### What is wrong?

Please include information like:

* full output of the error you received
* what command you ran
* the code that caused the failure (see [this link](https://help.github.com/articles/basic-writing-and-formatting-syntax/) for help with formatting code)


### How can it be fixed

Fill this in if you know how to fix it.


================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
### What was wrong?



### How was it fixed?



#### Cute Animal Picture

![put a cute animal picture link inside the parentheses]()


================================================
FILE: .gitignore
================================================
*.py[co]
__pycache__/
*~
[#]*[#]
.*.swp
.*.swo
.*.swn
.~
.DS_Store
/tmp/
/.venv/
/dist/
/*.egg-info/
/.tox/
/bin/
/develop-eggs/
/eggs/
.installed.cfg
logging.conf
*.log
.coverage
.eggs
.cache
.env
.idea
.venv*
.build

================================================
FILE: .travis.yml
================================================
sudo: false
language: python
dist: trusty
env:
  global:
    - PYTEST_ADDOPTS="-n 2 --durations 50 --maxfail 50"
matrix:
  include:
    #
    # Linting and Static Analysis
    #
    - python: "3.5"
      env: TOX_POSARGS="-e lint35"
    - python: "3.6"
      env: TOX_POSARGS="-e lint36"
    #
    # Python 3.5
    #
    - python: "3.5"
      env: TOX_POSARGS="-e py35-handler"
    #
    # Python 3.6
    #
    - python: "3.6"
      env: TOX_POSARGS="-e py36-contract"
    - python: "3.6"
      env: TOX_POSARGS="-e py36-handler"
cache:
  pip: true
install:
  - "travis_retry pip install pip setuptools --upgrade"
  - "travis_retry pip install tox"
before_script:
  - pip freeze
script:
  - tox $TOX_POSARGS
after_script:
  - cat .tox/$TOX_POSARGS/log/*.log


================================================
FILE: MANIFEST.in
================================================
include README.md
include requirements.txt
include requirements-dev.txt

include sharding/contracts/sharding_manager.json


================================================
FILE: Makefile
================================================
# Variables
# compile-smc parameters
compile_script = tools/vyper_compile_script.py
contract = sharding/contracts/sharding_manager.v.py
contract_json = sharding/contracts/sharding_manager.json

# Using target:prerequisites to avoid redundant compilation.
$(contract_json): $(contract)
	python $(compile_script) $(contract)

# Commands
help:
	@echo "compile-smc - compile sharding manager contract"
	@echo "clean - remove build and Python file artifacts"
	@echo "clean-build - remove build artifacts"
	@echo "clean-pyc - remove Python file artifacts"
	@echo "lint - check style with flake8 and mypy"
	@echo "test - run tests quickly with the default Python"
	@echo "test-all - run tox"
	@echo "release - package and upload a release"
	@echo "dist - package"

compile-smc: $(contract_json)

clean: clean-build clean-pyc

clean-build:
	rm -fr build/
	rm -fr dist/
	rm -fr *.egg-info

clean-pyc:
	find . -name '*.pyc' -exec rm -f {} +
	find . -name '*.pyo' -exec rm -f {} +
	find . -name '*~' -exec rm -f {} +

lint:
	tox -elint3{5,6}

test:
	py.test --tb native tests

test-all:
	tox

release: clean
	CURRENT_SIGN_SETTING=$(git config commit.gpgSign)
	git config commit.gpgSign true
	bumpversion $(bump)
	git push upstream && git push upstream --tags
	python setup.py sdist bdist_wheel upload
	git config commit.gpgSign "$(CURRENT_SIGN_SETTING)"

sdist: clean
	python setup.py sdist bdist_wheel
	ls -l dist


================================================
FILE: README.md
================================================
# Sharding

### Sharding Implementation
Refer [Py-EVM](https://github.com/ethereum/py-evm) for the latest implementation progress.

### Specification and Documentation
See the "docs" directory for documentation and EIPs.

### Ethereum Research Forum
Please visit [ethresear.ch](https://ethresear.ch/c/sharding).


================================================
FILE: docs/doc.html
================================================
<h3>Preliminaries</h3>

<p>We assume that at address <code>VALIDATOR_MANAGER_ADDRESS</code> (on the existing "main shard") there exists a contract that manages an active "validator set", and supports the following functions:</p>

<ul>
<li><code>deposit(address validationCodeAddr, address returnAddr) returns uint256</code>: adds a validator to the validator set, with the validator's size being the <code>msg.value</code> (ie. amount of ETH deposited) in the function call. Returns the validator index. <code>validationCodeAddr</code> stores the address of the validation code; the function fails if this address's code has not been purity-verified.</li>
<li><code>withdraw(uint256 validatorIndex, bytes sig) returns bool</code>: verifies that the signature is correct (ie. a call with 200000 gas, <code>validationCodeAddr</code> as destination, 0 value and <code>sha3("withdraw") + sig</code> as data returns 1), and if it is removes the validator from the validator set and refunds the deposited ETH.</li>
<li><code>sample(uint256 shardId) returns uint256</code>: uses a recent block hash as a seed to pseudorandomly select a signer from the validator set. Chance of being selected should be proportional to the validator's deposit.</li>
<li><code>addHeader(bytes header) returns bool</code>: attempts to process a collation header, returns True on success, reverts on failure.</li>
<li><code>get_shard_head(uint256 shardId) returns bytes32</code>: returns the header hash that is the head of a given shard as perceived by the manager contract.</li>
<li><code>getAncestor(bytes32 hash)</code>: returns the 10000th ancestor of this hash.</li>
<li><code>getAncestorDistance(bytes32 hash)</code>: returns the difference between the block number of this hash and the block number of the 10000th ancestor of this hash.</li>
<li><code>getCollationGasLimit()</code>: returns the gas limit that collations can currently have (by default make this function always answer 10 million).</li>
<li><code>txToShard(address to, uint256 shardId, bytes data) returns uint256</code>: records a request to deposit <code>msg.value</code> ETH to address <code>to</code> in shard <code>shardId</code> during a future collation. Saves a receipt ID for this request, also saving <code>msg.value</code>, <code>to</code>, <code>shardId</code>, <code>data</code> and <code>msg.sender</code>.</li>
</ul>

<h3>Parameters</h3>

<ul>
<li><code>SERENITY_FORK_BLKNUM</code>: ????</li>
<li><code>SHARD_COUNT</code>: 100</li>
<li><code>VALIDATOR_MANAGER_ADDRESS</code>: ????</li>
<li><code>USED_RECEIPT_STORE_ADDRESS</code>: ????</li>
<li><code>SIG_GASLIMIT</code>: 40000</li>
<li><code>COLLATOR_REWARD</code>: 0.002</li>
<li><code>PERIOD_LENGTH</code>: 5 blocks</li>
<li><code>SHUFFLING_CYCLE</code>: 2500 blocks</li>
</ul>

<h3>Specification</h3>

<p>We first define a "collation header" as an RLP list with the following values:</p>

<pre><code>[
    shard_id: uint256,
    expected_period_number: uint256,
    period_start_prevhash: bytes32,
    parent_collation_hash: bytes32,
    tx_list_root: bytes32,
    coinbase: address,
    post_state_root: bytes32,
    receipts_root: bytes32,
    sig: bytes
]
</code></pre>

<p>Where:</p>

<ul>
<li><code>shard_id</code> is the shard ID of the shard</li>
<li><code>expected_period_number</code> is the period number in which this collation expects to be included. A period is an interval of <code>PERIOD_LENGTH</code> blocks.</li>
<li><code>period_start_prevhash</code> is the block hash of block <code>PERIOD_LENGTH * expected_period_number - 1</code> (ie. the last block before the expected period starts). Opcodes in the shard that refer to block data (eg. NUMBER, DIFFICULTY) will refer to the data of this block, with the exception of COINBASE, which will refer to the shard coinbase.</li>
<li><code>parent_collation_hash</code> is the hash of the parent collation</li>
<li><code>tx_list_root</code> is the root hash of the trie holding the transactions included in this collation</li>
<li><code>post_state_root</code> is the new state root of the shard after this aollation</li>
<li><code>receipts_root</code> is the root hash of the receipt trie</li>
<li><code>sig</code> is a signature</li>
</ul>

<p>For blocks where <code>block.number &gt;= SERENITY_FORK_BLKNUM</code>, the block header's extra data must contain a hash which points to an RLP list of <code>SHARD_COUNT</code> objects, where each object is either the empty string or a valid collation header for a shard.</p>

<p>A <strong>collation header</strong> is valid if calling <code>addHeader(header)</code> returns true. The validator manager contract should do this if:</p>

<ul>
<li>The <code>shard_id</code> is at least 0, and less than <code>SHARD_COUNT</code></li>
<li>The <code>expected_period_number</code> equals <code>floor(block.number / PERIOD_LENGTH)</code></li>
<li>A collation with the hash <code>parent_collation_hash</code> has already been accepted</li>
<li>The <code>sig</code> is a valid signature. That is, if we calculate <code>validation_code_addr = sample(shard_id)</code>, then call <code>validation_code_addr</code> with the calldata being <code>sha3(shortened_header) ++ sig</code> (where <code>shortened_header</code> is the RLP encoded form of the collation header <em>without</em> the sig), the result of the call should be 1</li>
</ul>

<p>A <strong>collation</strong> is valid if (i) its collation header is valid, (ii) executing the collation on top of the <code>parent_collation_hash</code>'s <code>post_state_root</code> results in the given <code>post_state_root</code> and <code>receipts_root</code>, and (iii) the total gas used is less than or equal to the output of calling <code>getCollationGasLimit()</code> on the main shard.</p>

<h3>Collation state transition function</h3>

<p>The state transition process for executing a collation is as follows:</p>

<ul>
<li>Execute each transaction in the tree pointed to by <code>tx_list_root</code> in order</li>
<li>Assign a reward of <code>COLLATOR_REWARD</code> to the coinbase</li>
</ul>

<h3>Receipt-consuming transactions</h3>

<p>A transaction in a shard can use a receipt ID as its signature (that is, (v, r, s) = (1, receiptID, 0)). Let <code>(to, value, shard_id, sender, data)</code> be the values that were saved by the <code>txToShard</code> call that created this receipt. For such a transaction to be valid:</p>

<ul>
<li>Such a receipt <em>must</em> have in fact been created by a <code>txToShard</code> call in the main chain.</li>
<li>The <code>to</code> and <code>value</code> of the transaction <em>must</em> match the <code>to</code> and <code>value</code> of this receipt.</li>
<li>The shard Id <em>must</em> match <code>shard_id</code>.</li>
<li>The contract at address <code>USED_RECEIPT_STORE_ADDRESS</code> <em>must NOT</em> have a record saved saying that the given receipt ID was already consumed.</li>
</ul>

<p>The transaction has an additional side effect of saving a record in <code>USED_RECEIPT_STORE_ADDRESS</code> saying that the given receipt ID has been consumed. Such a transaction effects a message whose:</p>

<ul>
<li><code>sender</code> is <code>USED_RECEIPT_STORE_ADDRESS</code></li>
<li><code>to</code> is the <code>to</code> from the receipt</li>
<li><code>value</code> is the <code>value</code> from the receipt, minus <code>gasprice * gaslimit</code></li>
<li><code>data</code> is twelve zero bytes concatenated with the <code>sender</code> from the receipt concatenated with the <code>data</code> from the receipt</li>
<li>Gas refunds go to the <code>to</code> address</li>
</ul>

<h3>Details of <code>sample</code></h3>

<p>The <code>sample</code> function should be coded in such a way that any given validator randomly gets allocated to some number of shards every <code>SHUFFLING_CYCLE</code>, where the expected number of shards is proportional to the validator's balance. During that cycle, <code>sample(shard_id)</code> can only return that validator if the <code>shard_id</code> is one of the shards that they were assigned to. The purpose of this is to give validators time to download the state of the specific shards that they are allocated to.</p>

<p>Here is one possible implementation of <code>sample</code>, assuming for simplicity of illustration that all validators have the same deposit size:</p>

<pre><code>def sample(shard_id: num) -&gt; address:
    cycle = floor(block.number / SHUFFLING_CYCLE)
    cycle_seed = blockhash(cycle * SHUFFLING_CYCLE)
    seed = blockhash(block.number - (block.number % PERIOD_LENGTH))
    index_in_subset = num256_mod(as_num256(sha3(concat(seed, as_bytes32(shard_id)))),
                                 100)
    validator_index = num256_mod(as_num256(sha3(concat(cycle_seed), as_bytes32(shard_id), as_bytes32(index_in_subset))),
                                 as_num256(self.validator_set_size))
    return self.validators[validator_index]
</code></pre>

<p>This picks out 100 validators for each shard during each cycle, and then during each block one out of those 100 validators is picked by choosing a distinct <code>index_in_subset</code> for each block.</p>

<h3>Collation Header Production and Propagation</h3>

<p>We generally expect collation headers to be produced and propagated as follows.</p>

<ul>
<li>Every time a new <code>SHUFFLING_CYCLE</code> starts, every validator computes the set of 100 validators for every shard that they were assigned to, and sees which shards they are eligible to validate in. The validator then downloads the state for that shard (using fast sync)</li>
<li>The validator keeps track of the head of the chain for all shards they are currently assigned to. It is each validator's responsibility to reject invalid or unavailable collations, and refuse to build on such blocks, even if those blocks get accepted by the main chain validator manager contract.</li>
<li>If a validator is currently eligible to validate in some shard <code>i</code>, they download the full collation association with any collation header that is included into block headers for shard <code>i</code>.</li>
<li>When on the current global main chain a new period starts, the validator calls <code>sample(i)</code> to determine if they are eligible to create a collation; if they are, then they do so.</li>
</ul>

<h3>Rationale</h3>

<p>This allows for a quick and dirty form of medium-security proof of stake sharding in a way that achieves quadratic scaling through separation of concerns between block proposers and collators, and thereby increases throughput by ~100x without too many changes to the protocol or software architecture. This is intended to serve as the first phase in a multi-phase plan to fully roll out quadratic sharding, the latter phases of which are described below.</p>

<h3>Subsequent phases</h3>

<ul>
<li><strong>Phase 2, option a</strong>: require collation headers to be added in as uncles instead of as transactions</li>
<li><strong>Phase 2, option b</strong>: require collation headers to be added in an array, where item <code>i</code> in the array must be either a collation header of shard <code>i</code> or the empty string, and where the extra data must be the hash of this array (soft fork)</li>
<li><strong>Phase 3 (two-way pegging)</strong>: add to the <code>USED_RECEIPT_STORE_ADDRESS</code> contract a function that allows receipts to be created in shards. Add to the main chain's <code>VALIDATOR_MANAGER_ADDRESS</code> a function for submitting Merkle proofs of unspent receipts that have confirmed (ie. they point to some hash <code>h</code> such that some hash <code>h2</code> exists such that <code>getAncestor(h2) = h</code> and <code>getAncestorDistance(h2) &lt; 10000 * PERIOD_LENGTH * 1.33</code>), which has similar behavior to the <code>USED_RECEIPT_STORE_ADDRESS</code> contract in the shards.</li>
<li><strong>Phase 4 (tight coupling)</strong>: blocks are no longer valid if they point to invalid or unavailable collations. Add data availability proofs.</li>
</ul>


================================================
FILE: docs/doc.md
================================================
## Introduction

The purpose of this document is to provide a reasonably complete specification and introduction for anyone looking to understand the details of the sharding proposal, as well as to implement it. This document as written describes only "phase 1" of quadratic sharding; [phases 2, 3 and 4](https://github.com/ethereum/sharding/blob/develop/docs/doc.md#subsequent-phases) are at this point out of scope, and super-quadratic sharding ("Ethereum 3.0") is also out of scope.

Suppose that the variable `c` denotes the level of computational power available to one node. In a simple blockchain, the transaction capacity is bounded by O(c), as every node must process every transaction. The goal of quadratic sharding is to increase the capacity with a two-layer design. Stage 1 requires no hard forks; the main chain stays exactly as is. However, a contract is published to the main chain called the **validator manager contract** (VMC), which maintains the sharding system. There are O(c) **shards** (currently, 100), where each shard is like a separate "galaxy": it has its own account space, transactions need to specify which shard they are to be published inside, and communication between shards is very limited (in fact, in phase 1, it is nonexistent).

The shards are run on a simple longest-chain-rule proof of stake system, where the stake is on the main chain (specifically, inside the VMC). All shards share a common validator pool; this also means that anyone who signs up with the VMC as a validator could theoretically at any time be assigned the right to create a block on any shard. Each shard has a block size/gas limit of O(c), and so the total capacity of the system is O(c^2).

Most users of the sharding system will run both (i) either a full (O(c) resource requirements) or light (O(log(c)) resource requirements) node on the main chain, and (ii) a "shard client" which talks to the main chain node via RPC (this client is assumed to be trusted because it's also running on the user's computer) and which can also be used as a light client for any shard, as a full client for any specific shard (the user would have to specify that they are "watching" a specific shard) or as a validator node. In all cases, the storage and computation requirements for a shard client will also not exceed O(c) (unless the user chooses to specify that they are watching _every_ shard; block explorers and large exchanges may want to do this).

In this document, the term `Collation` is used to differentiate from `Block` because (i) they are different RLP objects: transactions are level 0 objects, collations are level 1 objects that package transactions, and blocks are level 2 objects that package collation (headers); (ii) it’s clearer in context of sharding. Basically, `Collation` must consist of `CollationHeader` and `TransactionList`; `Witness` and the detailed format of `Collation` will be defined in **Stateless clients** section. `Collator` is the collation proposer sampled by `getEligibleProposer` function of **Validator Manager Contract** in the main chain; the mechanism will be introduced in the following sections.

| Main Chain                                 | Shard Chain            |
|--------------------------------------------|------------------------|
| Block                                      | Collation              |
| BlockHeader                                | CollationHeader        |
| Block Proposer (or `Miner` in PoW chain)   | Collator               |

## Quadratic sharding

### Constants

* `LOOKAHEAD_LENGTH`: 4
* `PERIOD_LENGTH`: 5
* `COLLATION_GASLIMIT`: 10,000,000 gas
* `SHARD_COUNT`: 100
* `SIG_GASLIMIT`: 40000 gas
* `COLLATOR_REWARD`: 0.001 ETH

### Validator Manager Contract (VMC)

We assume that at address `VALIDATOR_MANAGER_ADDRESS` (on the existing "main shard") there exists the VMC, which supports the following functions:

-   `deposit() returns uint256`: adds a validator to the validator set, with the validator's size being the `msg.value` (i.e., the amount of ETH deposited) in the function call. This function returns the validator index.
-   `withdraw(uint256 validator_index) returns bool`: verifies that `msg.sender == validators[validator_index].addr`. if it is removes the validator from the validator set and refunds the deposited ETH.
-   `get_eligible_proposer(uint256 shard_id, uint256 period) returns address`: uses a block hash as a seed to pseudorandomly select a signer from the validator set. The chance of being selected should be proportional to the validator's deposit. The function should be able to return a value for the current period or any future up to `LOOKAHEAD_LENGTH` periods ahead.
-   `add_header(uint256 shard_id, uint256 expected_period_number, bytes32 period_start_prevhash, bytes32 parent_hash, bytes32 transaction_root, address coinbase, bytes32 state_root, bytes32 receipt_root, uint256 number) returns bool`: attempts to process a collation header, returns True on success, reverts on failure.
-   `get_shard_head(uint256 shard_id) returns bytes32`: returns the header hash that is the head of a given shard as perceived by the manager contract.

There is also one log type:

-   `CollationAdded(indexed uint256 shard_id, bytes collation_header_bytes, bool is_new_head, uint256 score)`

where `collation_header_bytes` can be constructed in vyper by

```python
    collation_header_bytes = concat(
        as_bytes32(shard_id),
        as_bytes32(expected_period_number),
        period_start_prevhash,
        parent_hash,
        transaction_root,
        as_bytes32(collation_coinbase),
        state_root,
        receipt_root,
        as_bytes32(collation_number),
    )
```

Note: `coinbase` and `number` are renamed to `collation_coinbase` and `collation_number`, due to the fact that they are reserved keywords in vyper.

### Collation header

We first define a "collation header" as an RLP list with the following values:

    [
        shard_id: uint256,
        expected_period_number: uint256,
        period_start_prevhash: bytes32,
        parent_hash: bytes32,
        transaction_root: bytes32,
        coinbase: address,
        state_root: bytes32,
        receipt_root: bytes32,
        number: uint256,
    ]

Where:

-   `shard_id` is the shard ID of the shard;
-   `expected_period_number` is the period number in which this collation expects to be included; this is calculated as `period_number = floor(block.number / PERIOD_LENGTH)`;
-   `period_start_prevhash` is the block hash of block `PERIOD_LENGTH * expected_period_number - 1` (i.e., it is the hash of the last block before the expected period starts). Opcodes in the shard that refer to block data (e.g. NUMBER and DIFFICULTY) will refer to the data of this block, with the exception of COINBASE, which will refer to the shard coinbase;
-   `parent_hash` is the hash of the parent collation;
-   `transaction_root` is the root hash of the trie holding the transactions included in this collation;
-   `state_root` is the new state root of the shard after this collation;
-   `receipt_root` is the root hash of the receipt trie;
-   `number` is the collation number, which is also the score for the fork choice rule now; and

A **collation header** is valid if calling `add_header(shard_id, expected_period_number, period_start_prevhash, parent_hash, transaction_root, coinbase, state_root, receipt_root, number)` returns true. The validator manager contract should do this if:

-   the `shard_id` is at least 0, and less than `SHARD_COUNT`;
-   the `expected_period_number` equals the actual current period number (i.e., `floor(block.number / PERIOD_LENGTH)`)
-   a collation with the hash `parent_hash` for the same shard has already been accepted;
-   a collation for the same shard has not yet been submitted during the current period;
-   the address of the sender of `add_header` is equal to the address returned by `get_eligible_proposer(shard_id, expected_period_number)`.

A **collation** is valid if: (i) its collation header is valid; (ii) executing the collation on top of the `parent_hash`'s `state_root` results in the given `state_root` and `receipt_root`; and (iii) the total gas used is less than or equal to `COLLATION_GASLIMIT`.

### Collation state transition function

The state transition process for executing a collation is as follows:

* execute each transaction in the tree pointed to by `transaction_root` in order; and
* assign a reward of `COLLATOR_REWARD` to the coinbase.

### Details of `getEligibleProposer`

Here is one simple implementation in Viper:

```python
def getEligibleProposer(shardId: num, period: num) -> address:
    assert period >= LOOKAHEAD_LENGTH
    assert (period - LOOKAHEAD_LENGTH) * PERIOD_LENGTH < block.number
    assert self.num_validators > 0

    h = as_num256(
        sha3(
            concat(
                blockhash((period - LOOKAHEAD_LENGTH) * PERIOD_LENGTH),
                as_bytes32(shardId)
            )
        )
    )
    return self.validators[
        as_num128(
            num256_mod(
                h,
                as_num256(self.num_validators)
            )
        )
    ].addr
```

## Stateless clients

A validator is only given a few minutes' notice (precisely, `LOOKAHEAD_LENGTH * PERIOD_LENGTH` blocks worth of notice) when they are asked to create a block on a given shard. In Ethereum 1.0, creating a block requires having access to the entire state in order to validate transactions. Here, our goal is to avoid requiring validators to store the state of the entire system (as that would be an O(c^2) computational resource requirement). Instead, we allow validators to create collations knowing only the state root, pushing the responsibility onto transaction senders to provide "witness data" (i.e., Merkle branches), to prove the pre-state of the accounts that the transaction affects, and to provide enough information to calculate the post-state root after executing the transaction.

(Note that it's theoretically possible to implement sharding in a non-stateless paradigm; however, this requires: (i) storage rent to keep storage size bounded; and (ii) validators to be assigned to create blocks in a single shard for O(c) time. This scheme avoids the need for these sacrifices.)

### Data format

We modify the format of a transaction so that the transaction must specify an **access list** enumerating the parts of the state that it can access (we describe this more precisely later; for now consider this informally as a list of addresses). Any attempt to read or write to any state outside of a transaction's specified access list during VM execution returns an error. This prevents attacks where someone sends a transaction that spends 5 million cycles of gas on random execution, then attempts to access a random account for which the transaction sender and the collator do not have a witness, preventing the collator from including the transaction and thereby wasting the collator's time.

_Outside_ of the signed body of the transaction, but packaged along with the transaction, the transaction sender must specify a "witness", an RLP-encoded list of Merkle tree nodes that provides the portions of the state that the transaction specifies in its access list. This allows the collator to process the transaction with only the state root. When publishing the collation, the collator also sends a witness for the entire collation.

#### Transaction package format

```python
    [
        [nonce, acct, data....],    # transaction body (see below for specification)
        [node1, node2, node3....]   # witness
    ]
```

#### Collation format

```python
    [
        [shard_id, ... , sig],   # header
        [tx1, tx2 ...],          # transaction list
        [node1, node2, node3...] # witness
    ]
```

See also ethresearch thread on [The Stateless Client Concept](https://ethresear.ch/t/the-stateless-client-concept/172).

### Stateless client state transition function

In general, we can describe a traditional "stateful" client as executing a state transition function `stf(state, tx) -> state'` (or `stf(state, block) -> state'`). In a stateless client model, nodes do not store the state. The functions `apply_transaction` and `apply_block` can be rewritten as follows:

```python
apply_block(state_obj, witness, block) -> state_obj', reads, writes
```

Where `state_obj` is a tuple containing the state root and other O(1)-sized state data (gas used, receipts, bloom filter, etc); `witness` is a witness; and `block` is the rest of the block. The returned output is:

* a new `state_obj` containing the new state root and other variables;
* the set of objects from the witness that have been read (which is useful for block creation); and
* the set of new state objects that have been created to form the new state trie.

This allows the functions to be "pure", as well as only dealing with small-sized objects (as opposed to the state in existing Ethereum, which is currently [hundreds of gigabytes](https://etherscan.io/chart/chaindatasizefull)), making them convenient to use for sharding.

### Client logic

A client would have a config of the following form:

```python
{
    validator_address: "0x..." OR null,
    watching: [list of shard IDs],
    ...
}
```

If a validator address is provided, then it checks (on the main chain) if the address is an active validator. If it is, then every time a new period on the main chain starts (i.e., when `floor(block.number / PERIOD_LENGTH)` changes), then it should call `getEligibleProposer` for all shards for period `floor(block.number / PERIOD_LENGTH) + LOOKAHEAD_LENGTH`. If it returns the validator's address for some shard `i`, then it runs the algorithm `CREATE_COLLATION(i)` (see below).

For every shard `i` in the `watching` list, every time a new collation header appears in the main chain, it downloads the full collation from the shard network, and verifies it. It locally keeps track of all valid headers (where validity is defined recursively, i.e., for a header to be valid its parent must also be valid), and accepts as the main shard chain the shard chain whose head has the highest score, and where all collations from the genesis collation to the head are valid and available. Note that this implies the reorgs of the main chain *and* reorgs of the shard chain may both influence the shard head.

### Fetch candidate heads in reverse sorted order

To implement the algorithms for watching a shard, and for creating a collation, the first primitive that we need is the following algorithm for fetching candidate heads in highest-to-lowest order. First, suppose the existence of an (impure, stateful) method `getNextLog()`, which gets the most recent `CollationAdded` log in some given shard that has not yet been fetched. This would work by fetching all the logs in recent blocks backwards, starting from the head, and within each block looking in reverse order through the receipts. We define an impure method `fetch_candidate_head` as follows:

```python
unchecked_logs = []
current_checking_score = None

def fetch_candidate_head():
    # Try to return a log that has the score that we are checking for,
    # checking in order of oldest to most recent.
    for i in range(len(unchecked_logs)-1, -1, -1):
        if unchecked_logs[i].score == current_checking_score:
            return unchecked_logs.pop(i)
    # If no further recorded but unchecked logs exist, go to the next
    # isNewHead = true log
    while 1:
        unchecked_logs.append(getNextLog())
        if unchecked_logs[-1].isNewHead is True:
            break
    o = unchecked_logs.pop()
    current_checking_score = o.score
    return o
```

To re-express in plain language, the idea is to scan backwards through `CollationAdded` logs (for the correct shard), and wait until you get to one where `isNewHead = True`. Return that log first, then return all more recent logs with a score equal to that log with `isNewHead = False`, in order of oldest to most recent. Then go to the previous log with `isNewHead = True` (this is guaranteed to have a score that is 1 lower than the previous NewHead), then go to all more recent collations after it with that score, and so forth.

The idea is that this algorithm is guaranteed to check potential head candidates in highest-to-lowest sorted order of score, with the second priority being oldest to most recent.

For example, suppose that `CollationAdded` logs have hashes and scores as follows:

    ... 10 11 12 11 13   14 15 11 12 13   14 12 13 14 15   16 17 18 19 16

Then, `isNewHead` would be assigned as:

    ... T  T  T  F  T    T  T  F  F  F    F  F  F  F  F    T  T  T  T  F

If we number the collations A1..A5, B1..B5, C1..C5 and D1..D5, the precise returning order is:

    D4 D3 D2 D1 D5 B2 C5 B1 C1 C4 A5 B5 C3 A3 B4 C2 A2 A4 B3 A1

### Watching a shard

If a client is watching a shard, it should attempt to download and verify any collations in that shard that it can (checking any given collation only if its parent has already been verified). To get the head at any time, keep calling `fetch_candidate_head()` until it returns a collation that has been verified; that collation is the head. This will in normal circumstances return a valid collation immediately or at most after a few tries due to latency or a small-scale attack that creates a few invalid or unavailable collations. Only in the case of a true long-running 51% attack will this algorithm degrade to O(N) time.

### CREATE_COLLATION

This process has three parts. The first part can be called `GUESS_HEAD(shard_id)`, with pseudocode here:

```python
# Download a single collation and check if it is valid or invalid (memoized)
validity_cache = {}
def memoized_fetch_and_verify_collation(c):
    if c.hash not in validity_cache:
        validity_cache[c.hash] = fetch_and_verify_collation(c)
    return validity_cache[c.hash]


def main(shard_id):
    head = None
    while 1:
        head = fetch_candidate_head(shard_id)
        c = head
        while 1:
            if not memoized_fetch_and_verify_collation(c):
                break
            c = get_parent(c)
```

`fetch_and_verify_collation(c)` involves fetching the full data of `c` (including witnesses) from the shard network, and verifying it. The above algorithm is equivalent to "pick the longest valid chain, check validity as far as possible, and if you find it's invalid then switch to the next-highest-scoring valid chain you know about". The algorithm should only stop when the validator runs out of time and it is time to create the collation. Every execution of `fetch_and_verify_collation` should also return a "write set" (see stateless client section above). Save all of these write sets, and combine them together; this is the `recent_trie_nodes_db`.

We can now define `UPDATE_WITNESS(tx, recent_trie_nodes_db)`. While running `GUESS_HEAD`, a node will have received some transactions. When it comes time to (attempt to) include a transaction into a collation, this algorithm will need to be run on the transaction first. Suppose that the transaction has an access list `[A1 ... An]`, and a witness `W`. For each `Ai`, use the current state tree root and get the Merkle branch for `Ai`, using the union of `recent_trie_nodes_db` and `W` as a database. If the original `W` was correct, and the transaction was sent not before the time that the client checked back to, then getting this Merkle branch will always succeed. After including the transaction into a collation, the "write set" from the state change should then also be added into the `recent_trie_nodes_db`.

Next, we have `CREATE_COLLATION`. For illustration, here is full pseudocode for a possible transaction-gathering part of this method.

```python
# Sort by descending order of gasprice
txpool = sorted(copy(available_transactions), key=-tx.gasprice)
collation = new Collation(...)
while len(txpool) > 0:
    # Remove txs that ask for too much gas
    i = 0
    while i < len(txpool):
        if txpool[i].startgas > GASLIMIT - collation.gasused:
            txpool.pop(i)
        else:
            i += 1
    tx = copy.deepcopy(txpool[0])
    tx.witness = UPDATE_WITNESS(tx.witness, recent_trie_nodes_db)
    # Try to add the transaction, discard if it fails
    success, reads, writes = add_transaction(collation, tx)
    recent_trie_nodes_db = union(recent_trie_nodes_db, writes)
    txpool.pop(0)
```

At the end, there is an additional step, finalizing the collation (to give the collator the reward, which is `COLLATOR_REWARD` ETH). This requires asking the network for a Merkle branch for the collator's account. When the network replies with this, the post-state root after applying the reward, as well as the fees, can be calculated. The collator can then package up the collation, of the form (header, txs, witness), where the witness is the union of the witnesses of all the transactions and the branch for the collator's account.

## Protocol changes

### Transaction format

The format of a transaction now becomes (note that this includes [account abstraction](https://ethresear.ch/t/tradeoffs-in-account-abstraction-proposals/263/20) and [read/write lists](https://ethresear.ch/t/account-read-write-lists/285/3)):

```
    [
        chain_id,      # 1 on mainnet
        shard_id,      # the shard the transaction goes onto
        target,        # account the tx goes to
        data,          # transaction data
        start_gas,     # starting gas
        gasprice,      # gasprice
        access_list,   # access list (see below for specification)
        code           # initcode of the target (for account creation)
    ]
```

The process for applying a transaction is now as follows:

* Verify that the `chain_id` and `shard_id` are correct
* Subtract `start_gas * gasprice` wei from the `target` account
* Check if the target `account` has code. If not, verify that `sha3(code)[12:] == target`
* If the target account is empty, execute a contract creation at the `target` with `code` as init code; otherwise skip this step
* Execute a message with the remaining gas as startgas, the `target` as the to address, 0xff...ff as the sender, 0 value, and the transaction `data` as data
* If either of the two executions fail, and <= 200000 gas has been consumed (i.e., `start_gas - remaining_gas <= 200000`), the transaction is invalid
* Otherwise `remaining_gas * gasprice` is refunded, and the fee paid is added to a fee counter (note: fees are NOT immediately added to the coinbase balance; instead, fees are added all at once during block finalization)

### Two-layer trie redesign

The existing account model is replaced with one where there is a single-layer trie, and all account balances, code and storage are incorporated into the trie. Specifically, the mapping is:

* Balance of account X: `sha3(X) ++ 0x00`
* Code of account X: `sha3(X) ++ 0x01`
* Storage key K of account X: `sha3(X) ++ 0x02 ++ K`

See also ethresearch thread on [A two-layer account trie inside a single-layer trie](https://ethresear.ch/t/a-two-layer-account-trie-inside-a-single-layer-trie/210)

Additionally, the trie is now a new binary trie design: https://github.com/ethereum/research/tree/master/trie_research

### Access list

The access list for an account looks as follows:

    [[address, prefix1, prefix2...], [address, prefix1, prefix2...], ...]

This basically means "the transaction can access the balance and code for the given accounts, as well as any storage key provided that at least one of the prefixes listed with the account is a prefix of the storage key". One can translate it into "prefix list form", which essentially is a list of prefixes of the underlying storage trie (see above section):

```python
def to_prefix_list_form(access_list):
    o = []
    for obj in access_list:
        addr, storage_prefixes = obj[0], obj[1:]
        o.append(sha3(addr) + b'\x00')
        o.append(sha3(addr) + b'\x01')
        for prefix in storage_prefixes:
            o.append(sha3(addr) + b'\x02' + prefix)
    return o
```

One can compute the witness for a transaction by taking the transaction's access list, converting it into prefix list form, then running the algorithm `get_witness_for_prefix` for each item in the prefix list form, and taking the union of these results.

`get_witness_for_prefix` returns a minimal set of trie nodes that are sufficient to access any key which starts with the given prefix. See implementation here: https://github.com/ethereum/research/blob/b0de8d352f6236c9fa2244fed871546fabb016d1/trie_research/new_bintrie.py#L250

In the EVM, any attempt to access (either by calling or SLOAD'ing or via an opcode such as `BALANCE` or `EXTCODECOPY`) an account that is outside the access list will lead to the EVM instance that made the access attempt immediately throwing an exception.

See also ethresearch thread on [Account read/write lists](https://ethresear.ch/t/account-read-write-lists/285).

### Gas costs

To be finalized.

## Subsequent phases

This allows for a quick and dirty form of medium-security proof of stake sharding in a way that achieves quadratic scaling through separation of concerns between block proposers and collators, and thereby increases throughput by ~100x without too many changes to the protocol or software architecture. This is intended to serve as the first phase in a multi-phase plan to fully roll out quadratic sharding, the latter phases of which are described below.

* **Phase 2 (two-way pegging)**: see section on `USED_RECEIPT_STORE`, still to be written
* **Phase 3, option a**: require collation headers to be added in as uncles instead of as transactions
* **Phase 3, option b**: require collation headers to be added in an array, where item `i` in the array must be either a collation header of shard `i` or the empty string, and where the extra data must be the hash of this array (soft fork)
* **Phase 4 (tight coupling)**: blocks are no longer valid if they point to invalid or unavailable collations. Add data availability proofs.


================================================
FILE: requirements-dev.txt
================================================
bumpversion>=0.5.3,<1
flake8==3.5.0
mypy==0.600
hypothesis==3.44.26
pytest==3.6.0
pytest-asyncio==0.8.0
pytest-cov==2.5.1
pytest-logging>=0.3.0
pytest-xdist==1.22.2
tox==3.0.0
eth-tester[py-evm]==0.1.0-beta.26
git+git://github.com/ethereum/vyper.git@08ba8ed7c3c84d44edda85ff28c96bd1e2d867fe


================================================
FILE: requirements.txt
================================================
cytoolz>=0.9.0,<1.0.0
eth-utils>=1.0.3,<2.0.0
rlp>=1.0.0,<2.0.0
web3>=4.1.0,<5.0.0
py-evm==0.2.0a18
eth-typing==1.0.0


================================================
FILE: setup.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages


# requirements
INSTALL_REQUIRES = list()
with open('requirements.txt') as f:
    INSTALL_REQUIRES = f.read().splitlines()

setup(
    name='sharding',
    # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
    version='0.0.2-alpha.2',
    description='Ethereum Sharding Manager Contract',
    url='https://github.com/ethereum/sharding',
    packages=find_packages(
        exclude=[
            "tests",
            "tests.*",
            "tools",
            "docs",
        ]
    ),
    python_requires='>=3.5, <4',
    py_modules=['sharding'],
    setup_requires=['setuptools-markdown'],
    long_description_markdown_filename='README.md',
    include_package_data=True,
    zip_safe=False,
    classifiers=[
        'Intended Audience :: Developers',
        'Natural Language :: English',
        'Programming Language :: Python :: 3.5',
        'Programming Language :: Python :: 3.6',
    ],
    install_requires=INSTALL_REQUIRES,
)


================================================
FILE: sharding/__init__.py
================================================
import pkg_resources

from sharding.contracts.utils.smc_utils import (  # noqa: F401
    get_smc_source_code,
    get_smc_json,
)

from sharding.handler.log_handler import (  # noqa: F401
    LogHandler,
)
from sharding.handler.shard_tracker import (  # noqa: F401
    ShardTracker,
)
from sharding.handler.smc_handler import (  # noqa: F401
    SMC,
)


__version__ = pkg_resources.get_distribution("sharding").version


================================================
FILE: sharding/contracts/__init__.py
================================================


================================================
FILE: sharding/contracts/sharding_manager.json
================================================
{"abi": [{"name": "RegisterNotary", "inputs": [{"type": "int128", "name": "index_in_notary_pool", "indexed": false}, {"type": "address", "name": "notary", "indexed": true}], "anonymous": false, "type": "event"}, {"name": "DeregisterNotary", "inputs": [{"type": "int128", "name": "index_in_notary_pool", "indexed": false}, {"type": "address", "name": "notary", "indexed": true}, {"type": "int128", "name": "deregistered_period", "indexed": false}], "anonymous": false, "type": "event"}, {"name": "ReleaseNotary", "inputs": [{"type": "int128", "name": "index_in_notary_pool", "indexed": false}, {"type": "address", "name": "notary", "indexed": true}], "anonymous": false, "type": "event"}, {"name": "AddHeader", "inputs": [{"type": "int128", "name": "period", "indexed": false}, {"type": "int128", "name": "shard_id", "indexed": true}, {"type": "bytes32", "name": "chunk_root", "indexed": false}], "anonymous": false, "type": "event"}, {"name": "SubmitVote", "inputs": [{"type": "int128", "name": "period", "indexed": false}, {"type": "int128", "name": "shard_id", "indexed": true}, {"type": "bytes32", "name": "chunk_root", "indexed": false}, {"type": "address", "name": "notary", "indexed": false}], "anonymous": false, "type": "event"}, {"name": "__init__", "outputs": [], "inputs": [{"type": "int128", "name": "_SHARD_COUNT"}, {"type": "int128", "name": "_PERIOD_LENGTH"}, {"type": "int128", "name": "_LOOKAHEAD_LENGTH"}, {"type": "int128", "name": "_COMMITTEE_SIZE"}, {"type": "int128", "name": "_QUORUM_SIZE"}, {"type": "int128", "name": "_NOTARY_DEPOSIT"}, {"type": "int128", "name": "_NOTARY_LOCKUP_LENGTH"}], "constant": false, "payable": false, "type": "constructor"}, {"name": "get_notary_info", "outputs": [{"type": "int128", "name": "out"}, {"type": "int128", "name": "out"}], "inputs": [{"type": "address", "name": "notary_address"}], "constant": true, "payable": false, "type": "function", "gas": 1288}, {"name": "update_notary_sample_size", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": false, "payable": false, "type": "function", "gas": 71573}, {"name": "register_notary", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": false, "payable": true, "type": "function", "gas": 347585}, {"name": "deregister_notary", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": false, "payable": false, "type": "function", "gas": 239744}, {"name": "release_notary", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": false, "payable": false, "type": "function", "gas": 120521}, {"name": "get_member_of_committee", "outputs": [{"type": "address", "name": "out"}], "inputs": [{"type": "int128", "name": "shard_id"}, {"type": "int128", "name": "index"}], "constant": true, "payable": false, "type": "function", "gas": 3704}, {"name": "add_header", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "int128", "name": "shard_id"}, {"type": "int128", "name": "period"}, {"type": "bytes32", "name": "chunk_root"}], "constant": false, "payable": false, "type": "function", "gas": 222697}, {"name": "get_vote_count", "outputs": [{"type": "int128", "name": "out"}], "inputs": [{"type": "int128", "name": "shard_id"}], "constant": true, "payable": false, "type": "function", "gas": 1229}, {"name": "has_notary_voted", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "int128", "name": "shard_id"}, {"type": "int128", "name": "index"}], "constant": true, "payable": false, "type": "function", "gas": 1321}, {"name": "submit_vote", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "int128", "name": "shard_id"}, {"type": "int128", "name": "period"}, {"type": "bytes32", "name": "chunk_root"}, {"type": "int128", "name": "index"}], "constant": false, "payable": false, "type": "function", "gas": 128234}, {"name": "notary_pool", "outputs": [{"type": "address", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1126}, {"name": "notary_pool_len", "outputs": [{"type": "int128", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 933}, {"name": "empty_slots_stack", "outputs": [{"type": "int128", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1186}, {"name": "empty_slots_stack_top", "outputs": [{"type": "int128", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 993}, {"name": "does_notary_exist", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "address", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1195}, {"name": "current_period_notary_sample_size", "outputs": [{"type": "int128", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 1053}, {"name": "next_period_notary_sample_size", "outputs": [{"type": "int128", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 1083}, {"name": "notary_sample_size_updated_period", "outputs": [{"type": "int128", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 1113}, {"name": "collation_records__chunk_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}, {"type": "int128", "name": "arg1"}], "constant": true, "payable": false, "type": "function", "gas": 1649}, {"name": "collation_records__proposer", "outputs": [{"type": "address", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}, {"type": "int128", "name": "arg1"}], "constant": true, "payable": false, "type": "function", "gas": 1685}, {"name": "collation_records__is_elected", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}, {"type": "int128", "name": "arg1"}], "constant": true, "payable": false, "type": "function", "gas": 1715}, {"name": "records_updated_period", "outputs": [{"type": "int128", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1456}, {"name": "head_collation_period", "outputs": [{"type": "int128", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1486}, {"name": "current_vote", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [{"type": "int128", "name": "arg0"}], "constant": true, "payable": false, "type": "function", "gas": 1516}], "bytecode": "0x600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a05260e0611c526101403934156100a757600080fd5b6060516020611c5260c03960c051806040519013156100c557600080fd5b80919012156100d357600080fd5b5060605160206020611c520160c03960c051806040519013156100f557600080fd5b809190121561010357600080fd5b5060605160206040611c520160c03960c0518060405190131561012557600080fd5b809190121561013357600080fd5b5060605160206060611c520160c03960c0518060405190131561015557600080fd5b809190121561016357600080fd5b5060605160206080611c520160c03960c0518060405190131561018557600080fd5b809190121561019357600080fd5b50606051602060a0611c520160c03960c051806040519013156101b557600080fd5b80919012156101c357600080fd5b50606051602060c0611c520160c03960c051806040519013156101e557600080fd5b80919012156101f357600080fd5b5061014051600d5561016051600e5561018051600f556101a0516010556101c0516011556101e05160125561020051601355611c3a56600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052634343d1b860005114156100c85734156100ac57600080fd5b3033146100b857600080fd5b60006003541460005260206000f3005b6384eb85c3600051141561015c57602060046101403734156100e957600080fd5b3033146100f557600080fd5b6060516004358060405190131561010b57600080fd5b809190121561011957600080fd5b5061014051600260c05260035460e052604060c02055600360605160018254018060405190131561014957600080fd5b809190121561015757600080fd5b815550005b6314de97d2600051141561021b57341561017557600080fd5b30331461018157600080fd5b60206101a06004634343d1b86101405261015c6000305af16101a257600080fd5b6101a051156101d5577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60005260206000f35b60036060516001825403806040519013156101ef57600080fd5b80919012156101fd57600080fd5b815550600260c05260035460e052604060c0205460005260206000f3005b631b16485060005114156102a3576020600461014037341561023c57600080fd5b600435602051811061024d57600080fd5b506040610160526101806001600460c0526101405160e052604060c02060c052602060c020015481526002600460c0526101405160e052604060c02060c052602060c020015481602001525061016051610180f3005b636ea86a1f60005114156103ba5734156102bc57600080fd5b600060a051600e54806102ce57600080fd5b6402540be400430205806080519013156102e757600080fd5b80919012156102f557600080fd5b1215610345576402540be4006402540be3ff60a051600e548061031757600080fd5b6402540be4004302058060805190131561033057600080fd5b809190121561033e57600080fd5b0305610384565b6402540be40060a051600e548061035b57600080fd5b6402540be4004302058060805190131561037457600080fd5b809190121561038257600080fd5b055b61014052610140516008541215156103a157600060005260206000f35b60075460065561014051600855600160005260206000f3005b639d34be88600051141561055b576012543412156103d757600080fd5b600560c0523360e052604060c02054156103f057600080fd5b600060006004636ea86a1f6101405261015c6000305af161041057600080fd5b6001546101a05260206102206004634343d1b86101c0526101dc6000305af161043857600080fd5b61022051151561046c5760206102a060046314de97d26102405261025c6000305af161046357600080fd5b6102a0516101a0525b33600060c0526101a05160e052604060c02055600160605160018254018060405190131561049957600080fd5b80919012156104a757600080fd5b8155506007546101a0511215156104e45760605160016101a05101806040519013156104d257600080fd5b80919012156104e057600080fd5b6007555b600460c0523360e052604060c02060c052602060c020348155600060018201556101a0516002820155506001600560c0523360e052604060c020556101a0516102c052337f42cc700f5b78a74c6520ec5341d7c49eeaa8f89015e714b4d7207c947c2d19ec60206102c0a2600160005260206000f3005b63664f158e600051141561077057341561057457600080fd5b6001600560c0523360e052604060c020541461058f57600080fd5b600060006004636ea86a1f6101405261015c6000305af16105af57600080fd5b6002600460c0523360e052604060c02060c052602060c02001546101a0526000600060246384eb85c36101c0526101a0516101e0526101dc6000305af16105f557600080fd5b6000600060c0526101a05160e052604060c02055600160605160018254038060405190131561062357600080fd5b809190121561063157600080fd5b815550600060a051600e548061064657600080fd5b6402540be4004302058060805190131561065f57600080fd5b809190121561066d57600080fd5b12156106bd576402540be4006402540be3ff60a051600e548061068f57600080fd5b6402540be400430205806080519013156106a857600080fd5b80919012156106b657600080fd5b03056106fc565b6402540be40060a051600e54806106d357600080fd5b6402540be400430205806080519013156106ec57600080fd5b80919012156106fa57600080fd5b055b6001600460c0523360e052604060c02060c052602060c02001556101a051610240526001600460c0523360e052604060c02060c052602060c020015461026052337fa528ff03c83165bca6de116822fb727543effc08e4e22a2447925ffe5e1364626040610240a2600160005260206000f3005b6358821dd760005114156109a457341561078957600080fd5b6001600560c0523360e052604060c02054146107a457600080fd5b60006001600460c0523360e052604060c02060c052602060c020015414156107cb57600080fd5b6060516013546001600460c0523360e052604060c02060c052602060c020015401806040519013156107fc57600080fd5b809190121561080a57600080fd5b600060a051600e548061081c57600080fd5b6402540be4004302058060805190131561083557600080fd5b809190121561084357600080fd5b1215610893576402540be4006402540be3ff60a051600e548061086557600080fd5b6402540be4004302058060805190131561087e57600080fd5b809190121561088c57600080fd5b03056108d2565b6402540be40060a051600e54806108a957600080fd5b6402540be400430205806080519013156108c257600080fd5b80919012156108d057600080fd5b055b136108dc57600080fd5b6002600460c0523360e052604060c02060c052602060c020015461014052600460c0523360e052604060c02060c052602060c0205461016052600460c0523360e052604060c02060c052602060c020600081556000600182015560006002820155506000600560c0523360e052604060c02055600060006000600061016051336000f161096857600080fd5b6101405161018052337f2443ae687d261a634cadc8eba71424fe46a8663d8c30011d2bebca3a4c999c906020610180a2600160005260206000f3005b6315de738d6000511415610c5057604060046101403734156109c557600080fd5b606051600435806040519013156109db57600080fd5b80919012156109e957600080fd5b5060605160243580604051901315610a0057600080fd5b8091901215610a0e57600080fd5b50600d546101405112600061014051121516610a2957600080fd5b600060a051600e5480610a3b57600080fd5b6402540be40043020580608051901315610a5457600080fd5b8091901215610a6257600080fd5b1215610ab2576402540be4006402540be3ff60a051600e5480610a8457600080fd5b6402540be40043020580608051901315610a9d57600080fd5b8091901215610aab57600080fd5b0305610af1565b6402540be40060a051600e5480610ac857600080fd5b6402540be40043020580608051901315610ae157600080fd5b8091901215610aef57600080fd5b055b61018052610180516008541215610b0e576007546101a052610b24565b610180516008541415610b23576006546101a0525b5b6060516001606051600e54610180510280604051901315610b4457600080fd5b8091901215610b5257600080fd5b0380604051901315610b6357600080fd5b8091901215610b7157600080fd5b6101c0526060516101a0516000811215610b8a57600080fd5b610b9357600080fd5b6101a0516000811215610ba557600080fd5b60006101c0516101004303811215610bbc57600080fd5b438110610bc857600080fd5b406020826102000101526020810190506101405160208261020001015260208101905061016051602082610200010152602081019050806102005261020090508051602082012090500680604051901315610c2257600080fd5b8091901215610c3057600080fd5b6101e052600060c0526101e05160e052604060c0205460005260206000f3005b63bbeff1ce6000511415610ea05760606004610140373415610c7157600080fd5b60605160043580604051901315610c8757600080fd5b8091901215610c9557600080fd5b5060605160243580604051901315610cac57600080fd5b8091901215610cba57600080fd5b50600d546101405112600061014051121516610cd557600080fd5b600060a051600e5480610ce757600080fd5b6402540be40043020580608051901315610d0057600080fd5b8091901215610d0e57600080fd5b1215610d5e576402540be4006402540be3ff60a051600e5480610d3057600080fd5b6402540be40043020580608051901315610d4957600080fd5b8091901215610d5757600080fd5b0305610d9d565b6402540be40060a051600e5480610d7457600080fd5b6402540be40043020580608051901315610d8d57600080fd5b8091901215610d9b57600080fd5b055b6101a052610160516101a05114610db357600080fd5b61016051600a60c0526101405160e052604060c0205412610dd357600080fd5b600060006004636ea86a1f6101c0526101dc6000305af1610df357600080fd5b600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c02061018051815560006001820155336002820155506101a051600a60c0526101405160e052604060c020556000600c60c0526101405160e052604060c0205561016051610220526101805161024052610140517f24a51436697045b93a79a2bda900b05055f1e1e91b021b4c2fb6f67cbb0b2e956040610220a2600160005260206000f3005b63f86fda376000511415610f3b5760206004610140373415610ec157600080fd5b60605160043580604051901315610ed757600080fd5b8091901215610ee557600080fd5b50600c60c0526101405160e052604060c0205461016052606051610100610f0b57600080fd5b610100610160510680604051901315610f2357600080fd5b8091901215610f3157600080fd5b60005260206000f3005b638f8cf17560005114156110195760406004610140373415610f5c57600080fd5b60605160043580604051901315610f7257600080fd5b8091901215610f8057600080fd5b5060605160243580604051901315610f9757600080fd5b8091901215610fa557600080fd5b50600c60c0526101405160e052604060c020546101805260016060516101605160ff0380604051901315610fd857600080fd5b8091901215610fe657600080fd5b6000811215610ffd578060000360020a8204611004565b8060020a82025b905090506101805116151560005260206000f3005b63e91737d16000511415611184576040600461014037341561103a57600080fd5b30331461104657600080fd5b6060516004358060405190131561105c57600080fd5b809190121561106a57600080fd5b506060516024358060405190131561108157600080fd5b809190121561108f57600080fd5b50600c60c0526101405160e052604060c020546101805260016060516101605160ff03806040519013156110c257600080fd5b80919012156110d057600080fd5b60008112156110e7578060000360020a82046110ee565b8060020a82025b905090506101a0526020610260602463f86fda376101e05261014051610200526101fc6000305af161111f57600080fd5b610260516101c0526101a05161018051176101805260ff6101c0511215611162576101805160016101805101101561115657600080fd5b60016101805101610180525b61018051600c60c0526101405160e052604060c02055600160005260206000f3005b6371742fa5600051141561153857608060046101403734156111a557600080fd5b606051600435806040519013156111bb57600080fd5b80919012156111c957600080fd5b50606051602435806040519013156111e057600080fd5b80919012156111ee57600080fd5b506060516064358060405190131561120557600080fd5b809190121561121357600080fd5b50600d54610140511260006101405112151661122e57600080fd5b600060a051600e548061124057600080fd5b6402540be4004302058060805190131561125957600080fd5b809190121561126757600080fd5b12156112b7576402540be4006402540be3ff60a051600e548061128957600080fd5b6402540be400430205806080519013156112a257600080fd5b80919012156112b057600080fd5b03056112f6565b6402540be40060a051600e54806112cd57600080fd5b6402540be400430205806080519013156112e657600080fd5b80919012156112f457600080fd5b055b6101c052610160516101c0511461130c57600080fd5b6010546101a0511260006101a05112151661132657600080fd5b33602061028060446315de738d6101e05261014051610200526101a051610220526101fc6000305af161135857600080fd5b610280511461136657600080fd5b61016051600a60c0526101405160e052604060c020541461138657600080fd5b61018051600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c02054146113bd57600080fd5b60206103406044638f8cf1756102a052610140516102c0526101a0516102e0526102bc6000305af16113ee57600080fd5b61034051156113fc57600080fd5b6020610400604463e91737d16103605261014051610380526101a0516103a05261037c6000305af161142d57600080fd5b6104005161143a57600080fd5b60206104c0602463f86fda3761044052610140516104605261045c6000305af161146357600080fd5b6104c051610420526001600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c02001541560115461042051121516156114ec5761016051600b60c0526101405160e052604060c0205560016001600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c02001555b610160516104e05261018051610500523361052052610140517f30070ae8079c39b04eac5372c1e108238a47bc888ecab315a9f53171d5e4c30160606104e0a2600160005260206000f3005b632901077a600051141561159a576020600461014037341561155957600080fd5b6060516004358060405190131561156f57600080fd5b809190121561157d57600080fd5b50600060c0526101405160e052604060c0205460005260206000f3005b63cdd8d52c60005114156115c05734156115b357600080fd5b60015460005260206000f3005b634b443aa4600051141561162257602060046101403734156115e157600080fd5b606051600435806040519013156115f757600080fd5b809190121561160557600080fd5b50600260c0526101405160e052604060c0205460005260206000f3005b631824181c600051141561164857341561163b57600080fd5b60035460005260206000f3005b6377ff3abe6000511415611697576020600461014037341561166957600080fd5b600435602051811061167a57600080fd5b50600560c0526101405160e052604060c0205460005260206000f3005b63c069707d60005114156116bd5734156116b057600080fd5b60065460005260206000f3005b63ab580c6a60005114156116e35734156116d657600080fd5b60075460005260206000f3005b6394250c0560005114156117095734156116fc57600080fd5b60085460005260206000f3005b63aa59419360005114156117a7576040600461014037341561172a57600080fd5b6060516004358060405190131561174057600080fd5b809190121561174e57600080fd5b506060516024358060405190131561176557600080fd5b809190121561177357600080fd5b50600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c0205460005260206000f3005b63f3c687dd600051141561184857604060046101403734156117c857600080fd5b606051600435806040519013156117de57600080fd5b80919012156117ec57600080fd5b506060516024358060405190131561180357600080fd5b809190121561181157600080fd5b506002600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c020015460005260206000f3005b63fe21918f60005114156118e9576040600461014037341561186957600080fd5b6060516004358060405190131561187f57600080fd5b809190121561188d57600080fd5b50606051602435806040519013156118a457600080fd5b80919012156118b257600080fd5b506001600960c0526101405160e052604060c02060c0526101605160e052604060c02060c052602060c020015460005260206000f3005b63e3ad6147600051141561194b576020600461014037341561190a57600080fd5b6060516004358060405190131561192057600080fd5b809190121561192e57600080fd5b50600a60c0526101405160e052604060c0205460005260206000f3005b6347ecf00d60005114156119ad576020600461014037341561196c57600080fd5b6060516004358060405190131561198257600080fd5b809190121561199057600080fd5b50600b60c0526101405160e052604060c0205460005260206000f3005b634830ad8f6000511415611a0f57602060046101403734156119ce57600080fd5b606051600435806040519013156119e457600080fd5b80919012156119f257600080fd5b50600c60c0526101405160e052604060c0205460005260206000f3005b5b61022a611c3a0361022a60003961022a611c3a036000f3"}

================================================
FILE: sharding/contracts/sharding_manager.v.py
================================================
# NOTE: Some variables are set as public variables for testing. They should be reset
# to private variables in an official deployment of the contract. 

#
# Events
#

RegisterNotary: event({index_in_notary_pool: int128, notary: indexed(address)})
DeregisterNotary: event({index_in_notary_pool: int128, notary: indexed(address), deregistered_period: int128})
ReleaseNotary: event({index_in_notary_pool: int128, notary: indexed(address)})
AddHeader: event({period: int128, shard_id: indexed(int128), chunk_root: bytes32})
SubmitVote: event({period: int128, shard_id: indexed(int128), chunk_root: bytes32, notary: address})


#
# State Variables
#

# Notary pool
# - notary_pool: array of active notary addresses
# - notary_pool_len: size of the notary pool
# - empty_slots_stack: stack of empty notary slot indices
# - empty_slots_stack_top: top index of the stack
notary_pool: public(address[int128])
notary_pool_len: public(int128)
empty_slots_stack: public(int128[int128])
empty_slots_stack_top: public(int128)

# Notary registry
# - deregistered: the period when the notary deregister. It defaults to 0 for not yet deregistered notarys
# - pool_index: indicates notary's index in the notary pool
# - deposit: notary's deposit value
notary_registry: {
    deregistered: int128,
    pool_index: int128,
    deposit: wei_value
}[address]
# - does_notary_exist: returns true if notary's record exist in notary registry
does_notary_exist: public(bool[address])

# Notary sampling info
# In order to keep sample size unchanged through out entire period, we keep track of pool size change
# resulted from notary regitration/deregistration in current period and apply the change until next period. 
# - current_period_notary_sample_size: 
# - next_period_notary_sample_size: 
# - notary_sample_size_updated_period: latest period when current_period_notary_sample_size is updated
current_period_notary_sample_size: public(int128)
next_period_notary_sample_size: public(int128)
notary_sample_size_updated_period: public(int128)

# Collation
# - collation_records: the collation records that have been appended by the proposer.
# Mapping [period][shard_id] to chunk_root and proposer. is_elected is used to indicate if
# this collation has received enough votes.
# - records_updated_period: the latest period in which new collation header has been
# submitted for the given shard.
# - head_collation_period: period number of the head collation in the given shard, e.g., if
# a collation which is added in period P in shard 3 receives enough votes, then
# head_collation_period[3] is set to P.
collation_records: public({
    chunk_root: bytes32,
    proposer: address,
    is_elected: bool
}[int128][int128])
records_updated_period: public(int128[int128])
head_collation_period: public(int128[int128])

# Notarization
# - current_vote: vote count of collation in current period in each shard.
# First 31 bytes: bitfield of which notary has voted and which has not. First bit
# represents notary's vote(notary with index 0 in get_committee_member) and second
# bit represents next notary's vote(notary with index 1) and so on.
current_vote: public(bytes32[int128])


#
# Configuration Parameters
# 

# The total number of shards within a network.
# Provisionally SHARD_COUNT := 100 for the phase 1 testnet.
SHARD_COUNT: int128

# The period of time, denominated in main chain block times, during which
# a collation tree can be extended by one collation.
# Provisionally PERIOD_LENGTH := 5, approximately 75 seconds.
PERIOD_LENGTH: int128

# The lookahead time, denominated in periods, for eligible collators to
# perform windback and select proposals.
# Provisionally LOOKAHEAD_LENGTH := 4, approximately 5 minutes.
LOOKAHEAD_LENGTH: int128

# The number of notaries to select from notary pool for each shard in each period.
COMMITTEE_SIZE: int128

# The threshold(number of notaries in committee) for a proposal to be deem accepted
QUORUM_SIZE: int128

# The fixed-size deposit, denominated in ETH, required for registration.
# Provisionally COLLATOR_DEPOSIT := 1000 and PROPOSER_DEPOSIT := 1.
NOTARY_DEPOSIT: wei_value

# The amount of time, denominated in periods, a deposit is locked up from the
# time of deregistration.
# Provisionally COLLATOR_LOCKUP_LENGTH := 16128, approximately two weeks, and
# PROPOSER_LOCKUP_LENGTH := 48, approximately one hour.
NOTARY_LOCKUP_LENGTH: int128


@public
def __init__(
        _SHARD_COUNT: int128,
        _PERIOD_LENGTH: int128,
        _LOOKAHEAD_LENGTH: int128,
        _COMMITTEE_SIZE: int128,
        _QUORUM_SIZE: int128,
        _NOTARY_DEPOSIT: wei_value,
        _NOTARY_LOCKUP_LENGTH: int128,
    ):
    self.SHARD_COUNT = _SHARD_COUNT
    self.PERIOD_LENGTH = _PERIOD_LENGTH
    self.LOOKAHEAD_LENGTH = _LOOKAHEAD_LENGTH
    self.COMMITTEE_SIZE = _COMMITTEE_SIZE
    self.QUORUM_SIZE = _QUORUM_SIZE
    self.NOTARY_DEPOSIT = _NOTARY_DEPOSIT
    self.NOTARY_LOCKUP_LENGTH = _NOTARY_LOCKUP_LENGTH


# Checks if empty_slots_stack_top is empty
@private
def is_empty_slots_stack_empty() -> bool:
    return (self.empty_slots_stack_top == 0)


# Pushes one int128 to empty_slots_stack
@private
def empty_slots_stack_push(index: int128):
    self.empty_slots_stack[self.empty_slots_stack_top] = index
    self.empty_slots_stack_top += 1


# Pops one int128 out of empty_slots_stack
@private
def empty_slots_stack_pop() -> int128:
    if self.is_empty_slots_stack_empty():
        return -1
    self.empty_slots_stack_top -= 1
    return self.empty_slots_stack[self.empty_slots_stack_top]


# Helper functions to get notary info in notary_registry
@public
@constant
def get_notary_info(notary_address: address) -> (int128, int128):
    return (self.notary_registry[notary_address].deregistered, self.notary_registry[notary_address].pool_index)


# Update notary_sample_size
@public
def update_notary_sample_size() -> bool:
    current_period: int128 = floor(block.number / self.PERIOD_LENGTH)
    if self.notary_sample_size_updated_period >= current_period:
        return False

    self.current_period_notary_sample_size = self.next_period_notary_sample_size
    self.notary_sample_size_updated_period = current_period

    return True


# Adds an entry to notary_registry, updates the notary pool (notary_pool, notary_pool_len, etc.),
# locks a deposit of size NOTARY_DEPOSIT, and returns True on success.
@public
@payable
def register_notary() -> bool:
    assert msg.value >= self.NOTARY_DEPOSIT
    assert not self.does_notary_exist[msg.sender]

    # Update notary_sample_size
    self.update_notary_sample_size()

    # Add the notary to the notary pool
    pool_index: int128 = self.notary_pool_len
    if not self.is_empty_slots_stack_empty():
        pool_index = self.empty_slots_stack_pop()        
    self.notary_pool[pool_index] = msg.sender
    self.notary_pool_len += 1

    # If index is larger than notary_sample_size, expand notary_sample_size in next period.
    if pool_index >= self.next_period_notary_sample_size:
        self.next_period_notary_sample_size = pool_index + 1

    # Add the notary to the notary registry
    self.notary_registry[msg.sender] = {
        deregistered: 0,
        pool_index: pool_index,
        deposit: msg.value,
    }
    self.does_notary_exist[msg.sender] = True

    log.RegisterNotary(pool_index, msg.sender)

    return True


# Sets the deregistered period in the notary_registry entry, updates the notary pool (notary_pool, notary_pool_len, etc.),
# and returns True on success.
@public
def deregister_notary() -> bool:
    assert self.does_notary_exist[msg.sender] == True

    # Update notary_sample_size
    self.update_notary_sample_size()

    # Delete entry in notary pool
    index_in_notary_pool: int128 = self.notary_registry[msg.sender].pool_index 
    self.empty_slots_stack_push(index_in_notary_pool)
    self.notary_pool[index_in_notary_pool] = None
    self.notary_pool_len -= 1

    # Set deregistered period to current period
    self.notary_registry[msg.sender].deregistered = floor(block.number / self.PERIOD_LENGTH)

    log.DeregisterNotary(index_in_notary_pool, msg.sender, self.notary_registry[msg.sender].deregistered)

    return True


# Removes an entry from notary_registry, releases the notary deposit, and returns True on success.
@public
def release_notary() -> bool:
    assert self.does_notary_exist[msg.sender] == True
    assert self.notary_registry[msg.sender].deregistered != 0
    assert floor(block.number / self.PERIOD_LENGTH) > self.notary_registry[msg.sender].deregistered + self.NOTARY_LOCKUP_LENGTH

    pool_index: int128 = self.notary_registry[msg.sender].pool_index
    deposit: wei_value = self.notary_registry[msg.sender].deposit
    # Delete entry in notary registry
    self.notary_registry[msg.sender] = {
        deregistered: 0,
        pool_index: 0,
        deposit: 0,
    }
    self.does_notary_exist[msg.sender] = False

    send(msg.sender, deposit)

    log.ReleaseNotary(pool_index, msg.sender)

    return True


# Given shard_id and index, return the chosen notary in the current period
@public
@constant
def get_member_of_committee(
        shard_id: int128,
        index: int128,
    ) -> address:
    # Check that shard_id is valid
    assert shard_id >= 0 and shard_id < self.SHARD_COUNT
    period: int128 = floor(block.number / self.PERIOD_LENGTH)

    # Decide notary pool length based on if notary sample size is updated
    sample_size: int128
    if self.notary_sample_size_updated_period < period:
        sample_size = self.next_period_notary_sample_size
    elif self.notary_sample_size_updated_period == period:
        sample_size = self.current_period_notary_sample_size

    # Block hash used as entropy is the latest block of previous period  
    entropy_block_number: int128 = period * self.PERIOD_LENGTH - 1

    sampled_index: int128 = convert(
        convert(
            sha3(
                concat(
                    blockhash(entropy_block_number),
                    convert(shard_id, "bytes32"),
                    convert(index, "bytes32"),
                )
            ),
            "uint256",
        ) % convert(sample_size, "uint256"),
        'int128',
    )
    return self.notary_pool[sampled_index]


# Attempts to process a collation header, returns True on success, reverts on failure.
@public
def add_header(
        shard_id: int128,
        period: int128,
        chunk_root: bytes32
    ) -> bool:

    # Check that shard_id is valid
    assert shard_id >= 0 and shard_id < self.SHARD_COUNT
    # Check that it's current period
    current_period: int128 = floor(block.number / self.PERIOD_LENGTH)
    assert current_period == period
    # Check that no header is added yet in this period in this shard
    assert self.records_updated_period[shard_id] < period

    # Update notary_sample_size
    self.update_notary_sample_size()

    # Add header
    self.collation_records[shard_id][period] = {
        chunk_root: chunk_root,
        proposer: msg.sender,
        is_elected: False,
    }

    # Update records_updated_period
    self.records_updated_period[shard_id] = current_period

    # Clear previous vote count
    self.current_vote[shard_id] = None

    # Emit log
    log.AddHeader(
        period,
        shard_id,
        chunk_root,
    )

    return True


# Helper function to get vote count
@public
@constant
def get_vote_count(shard_id: int128) -> int128:
    current_vote_in_uint: uint256 = convert(self.current_vote[shard_id], 'uint256')

    # Extract current vote count(last byte of current_vote)
    return convert(
        current_vote_in_uint % convert(
            2**8,
            'uint256'
        ),
        'int128'
    )


# Helper function to get vote count
@public
@constant
def has_notary_voted(shard_id: int128, index: int128) -> bool:
    # Right shift current_vote then AND(bitwise) it's value with value 1 to see if
    # notary of given index in bitfield had voted.
    current_vote_in_uint: uint256 = convert(self.current_vote[shard_id], 'uint256')
    # Convert the result from integer to bool
    return not not bitwise_and(
        current_vote_in_uint,
        shift(convert(1, 'uint256'), 255 - index),
    )


@private
def update_vote(shard_id: int128, index: int128) -> bool:
    current_vote_in_uint: uint256 = convert(self.current_vote[shard_id], 'uint256')
    index_in_bitfield: uint256 = shift(convert(1, 'uint256'), 255 - index)
    old_vote_count: int128 = self.get_vote_count(shard_id)

    # Update bitfield
    current_vote_in_uint = bitwise_or(current_vote_in_uint, index_in_bitfield)
    # Update vote count
    # Add an upper bound check to prevent 1-byte vote count overflow
    if old_vote_count < 255:
        current_vote_in_uint = current_vote_in_uint + convert(1, 'uint256')
    self.current_vote[shard_id] = convert(current_vote_in_uint, 'bytes32')

    return True


# Notary submit a vote
@public
def submit_vote(
        shard_id: int128,
        period: int128,
        chunk_root: bytes32,
        index: int128,
    ) -> bool:

    # Check that shard_id is valid
    assert shard_id >= 0 and shard_id < self.SHARD_COUNT
    # Check that it's current period
    current_period: int128 = floor(block.number / self.PERIOD_LENGTH)
    assert current_period == period
    # Check that index is valid
    assert index >= 0 and index < self.COMMITTEE_SIZE
    # Check that notary is eligible to cast a vote
    assert self.get_member_of_committee(shard_id, index) == msg.sender
    # Check that collation record exists and matches
    assert self.records_updated_period[shard_id] == period
    assert self.collation_records[shard_id][period].chunk_root == chunk_root
    # Check that notary has not yet voted
    assert not self.has_notary_voted(shard_id, index)

    # Update bitfield and vote count
    assert self.update_vote(shard_id, index)

    # Check if we have enough vote and make update accordingly
    current_vote_count: int128 = self.get_vote_count(shard_id)
    if current_vote_count >= self.QUORUM_SIZE and \
        not self.collation_records[shard_id][period].is_elected:
        self.head_collation_period[shard_id] = period
        self.collation_records[shard_id][period].is_elected = True

    # Emit log
    log.SubmitVote(
        period,
        shard_id,
        chunk_root,
        msg.sender,
    )

    return True


================================================
FILE: sharding/contracts/utils/__init__.py
================================================


================================================
FILE: sharding/contracts/utils/config.py
================================================
from typing import (
    Any,
    Dict,
)
from eth_utils import (
    to_wei,
)

from evm.utils import (
    env,
)


def get_sharding_config() -> Dict[str, Any]:
    return {
        'SHARD_COUNT': env.get('SHARDING_SHARD_COUNT', type=int, default=100),
        'PERIOD_LENGTH': env.get('SHARDING_PERIOD_LENGTH', type=int, default=100),
        'LOOKAHEAD_LENGTH': env.get('SHARDING_LOOKAHEAD_LENGTH', type=int, default=4),
        'COMMITTEE_SIZE': env.get('SHARDING_COMMITTEE_SIZE', type=int, default=135),
        'QUORUM_SIZE': env.get('SHARDING_QUORUM_SIZE', type=int, default=90),
        'NOTARY_DEPOSIT': env.get(
            'SHARDING_NOTARY_DEPOSIT',
            type=int,
            default=to_wei('1000', 'ether'),
        ),
        'NOTARY_LOCKUP_LENGTH': env.get(
            'SHARDING_NOTARY_LOCKUP_LENGTH',
            type=int,
            default=16128,
        ),
        'NOTARY_REWARD': env.get(
            'SHARDING_NOTARY_REWARD',
            type=int,
            default=to_wei('0.001', 'ether'),
        ),
        'GAS_PRICE': env.get('SHARDING_GAS_PRICE', type=int, default=1),
    }


================================================
FILE: sharding/contracts/utils/smc_utils.py
================================================
import json
import os

from typing import (
    Any,
    Dict,
)


DIR = os.path.dirname(__file__)


def get_smc_source_code() -> str:
    file_path = os.path.join(DIR, '../sharding_manager.v.py')
    smc_source_code = open(file_path).read()
    return smc_source_code


def get_smc_json() -> Dict[str, Any]:
    file_path = os.path.join(DIR, '../sharding_manager.json')
    smc_json_str = open(file_path).read()
    return json.loads(smc_json_str)


================================================
FILE: sharding/handler/__init__.py
================================================


================================================
FILE: sharding/handler/exceptions.py
================================================
class LogParsingError(Exception):
    pass


================================================
FILE: sharding/handler/log_handler.py
================================================
import logging
from typing import (
    Any,
    Dict,
    List,
    Union,
)

from evm.exceptions import BlockNotFound

from web3 import Web3

from eth_typing import (
    Address,
)


class LogHandler:

    logger = logging.getLogger("sharding.handler.LogHandler")

    def __init__(self, w3: Web3, period_length: int) -> None:
        self.w3 = w3
        self.period_length = period_length

    def get_logs(self,
                 address: Address=None,
                 topics: List[Union[str, None]]=None,
                 from_block: Union[int, str]=None,
                 to_block: Union[int, str]=None) -> List[Dict[str, Any]]:
        filter_params = {
            'address': address,
            'topics': topics,
        }  # type: Dict[str, Any]

        current_block_number = self.w3.eth.blockNumber
        if from_block is None:
            # Search from the start of current period if from_block is not given
            filter_params['fromBlock'] = current_block_number - \
                current_block_number % self.period_length
        else:
            if from_block > current_block_number:
                raise BlockNotFound(
                    "Try to search from block number {} while current block number is {}".format(
                        from_block,
                        current_block_number
                    )
                )
            filter_params['fromBlock'] = from_block

        if to_block is None:
            filter_params['toBlock'] = 'latest'
        else:
            filter_params['toBlock'] = min(current_block_number, to_block)

        return self.w3.eth.getLogs(filter_params)


================================================
FILE: sharding/handler/shard_tracker.py
================================================
from web3 import Web3

from typing import (
    Any,
    Dict,
    Generator,
    List,
    Optional,
    Union,
    Tuple,
)

from eth_utils import (
    encode_hex,
    to_list,
    is_address,
)
from eth_typing import (
    Address,
)

from sharding.contracts.utils.config import (
    get_sharding_config,
)
from sharding.handler.log_handler import (
    LogHandler,
)
from sharding.handler.utils.log_parser import LogParser
from sharding.handler.utils.shard_tracker_utils import (
    to_log_topic_address,
    get_event_signature_from_abi,
)


class ShardTracker:
    """Track emitted logs of specific shard.
    """

    def __init__(self,
                 w3: Web3,
                 config: Optional[Dict[str, Any]],
                 shard_id: int,
                 smc_handler_address: Address) -> None:
        if config is None:
            self.config = get_sharding_config()
        else:
            self.config = config
        self.shard_id = shard_id
        self.log_handler = LogHandler(w3, self.config['PERIOD_LENGTH'])
        self.smc_handler_address = smc_handler_address

    def _get_logs_by_shard_id(self,
                              event_name: str,
                              from_block: Union[int, str]=None,
                              to_block: Union[int, str]=None) -> List[Dict[str, Any]]:
        """Search logs by the shard id.
        """
        return self.log_handler.get_logs(
            address=self.smc_handler_address,
            topics=[
                encode_hex(get_event_signature_from_abi(event_name)),
                encode_hex(self.shard_id.to_bytes(32, byteorder='big')),
            ],
            from_block=from_block,
            to_block=to_block,
        )

    def _get_logs_by_notary(self,
                            event_name: str,
                            notary: Union[str, None],
                            from_block: Union[int, str]=None,
                            to_block: Union[int, str]=None) -> List[Dict[str, Any]]:
        """Search logs by notary address.

        NOTE: The notary address provided must be padded to 32 bytes
        and also hex-encoded. If notary address provided
        is `None`, it will return all logs related to the event.
        """
        return self.log_handler.get_logs(
            address=self.smc_handler_address,
            topics=[
                encode_hex(get_event_signature_from_abi(event_name)),
                notary,
            ],
            from_block=from_block,
            to_block=to_block,
        )

    def _decide_period_block_number(self,
                                    from_period: Union[int, None],
                                    to_period: Union[int, None]
                                    ) -> Tuple[Union[int, None], Union[int, None]]:
        if from_period is None:
            from_block = None
        else:
            from_block = from_period * self.config['PERIOD_LENGTH']

        if to_period is None:
            to_block = None
        else:
            to_block = (to_period + 1) * self.config['PERIOD_LENGTH'] - 1

        return from_block, to_block

    #
    # Basic functions to get emitted logs
    #
    @to_list
    def get_register_notary_logs(self,
                                 from_period: int=None,
                                 to_period: int=None) -> Generator[LogParser, None, None]:
        from_block, to_block = self._decide_period_block_number(from_period, to_period)
        logs = self._get_logs_by_notary(
            'RegisterNotary',
            notary=None,
            from_block=from_block,
            to_block=to_block,
        )
        for log in logs:
            yield LogParser(event_name='RegisterNotary', log=log)

    @to_list
    def get_deregister_notary_logs(self,
                                   from_period: int=None,
                                   to_period: int=None
                                   ) -> Generator[LogParser, None, None]:
        from_block, to_block = self._decide_period_block_number(from_period, to_period)
        logs = self._get_logs_by_notary(
            'DeregisterNotary',
            notary=None,
            from_block=from_block,
            to_block=to_block,
        )
        for log in logs:
            yield LogParser(event_name='DeregisterNotary', log=log)

    @to_list
    def get_release_notary_logs(self,
                                from_period: int=None,
                                to_period: int=None
                                ) -> Generator[LogParser, None, None]:
        from_block, to_block = self._decide_period_block_number(from_period, to_period)
        logs = self._get_logs_by_notary(
            'ReleaseNotary',
            notary=None,
            from_block=from_block,
            to_block=to_block,
        )
        for log in logs:
            yield LogParser(event_name='ReleaseNotary', log=log)

    @to_list
    def get_add_header_logs(self,
                            from_period: int=None,
                            to_period: int=None
                            ) -> Generator[LogParser, None, None]:
        from_block, to_block = self._decide_period_block_number(from_period, to_period)
        logs = self._get_logs_by_shard_id(
            'AddHeader',
            from_block=from_block,
            to_block=to_block,
        )
        for log in logs:
            yield LogParser(event_name='AddHeader', log=log)

    @to_list
    def get_submit_vote_logs(self,
                             from_period: int=None,
                             to_period: int=None
                             ) -> Generator[LogParser, None, None]:
        from_block, to_block = self._decide_period_block_number(from_period, to_period)
        logs = self._get_logs_by_shard_id(
            'SubmitVote',
            from_block=from_block,
            to_block=to_block,
        )
        for log in logs:
            yield LogParser(event_name='SubmitVote', log=log)

    #
    # Functions for user to check the status of registration or votes
    #
    def is_notary_registered(self, notary: str, from_period: int=None) -> bool:
        assert is_address(notary)
        from_block, _ = self._decide_period_block_number(from_period, None)
        log = self._get_logs_by_notary(
            'RegisterNotary',
            notary=to_log_topic_address(notary),
            from_block=from_block,
        )
        return False if not log else True

    def is_notary_deregistered(self, notary: str, from_period: int=None) -> bool:
        assert is_address(notary)
        from_block, _ = self._decide_period_block_number(from_period, None)
        log = self._get_logs_by_notary(
            'DeregisterNotary',
            notary=to_log_topic_address(notary),
            from_block=from_block,
        )
        return False if not log else True

    def is_notary_released(self, notary: str, from_period: int=None) -> bool:
        assert is_address(notary)
        from_block, _ = self._decide_period_block_number(from_period, None)
        log = self._get_logs_by_notary(
            'ReleaseNotary',
            notary=to_log_topic_address(notary),
            from_block=from_block,
        )
        return False if not log else True

    def is_new_header_added(self, period: int) -> bool:
        # Get the header added in the specified period
        log = self._get_logs_by_shard_id(
            'AddHeader',
            from_block=period * self.config['PERIOD_LENGTH'],
            to_block=(period + 1) * self.config['PERIOD_LENGTH'] - 1,
        )
        return False if not log else True

    def has_enough_vote(self, period: int) -> bool:
        # Get the votes submitted in the specified period
        logs = self._get_logs_by_shard_id(
            'SubmitVote',
            from_block=period * self.config['PERIOD_LENGTH'],
            to_block=(period + 1) * self.config['PERIOD_LENGTH'] - 1,
        )
        return False if not logs else len(logs) >= self.config['QUORUM_SIZE']


================================================
FILE: sharding/handler/smc_handler.py
================================================
import logging
from typing import (
    Any,
    Dict,
    Iterable,
    List,
    Tuple,
)

from web3.contract import (
    Contract,
)
from eth_utils import (
    decode_hex,
    to_canonical_address,
)

from sharding.handler.utils.smc_handler_utils import (
    make_call_context,
    make_transaction_context,
)
from sharding.contracts.utils.smc_utils import (
    get_smc_json,
)

from eth_keys import (
    datatypes,
)
from eth_typing import (
    Address,
    Hash32,
)


smc_json = get_smc_json()


class SMC(Contract):

    logger = logging.getLogger("sharding.SMC")
    abi = smc_json["abi"]
    bytecode = decode_hex(smc_json["bytecode"])

    default_priv_key = None  # type: datatypes.PrivateKey
    default_sender_address = None  # type: Address
    config = None  # type: Dict[str, Any]

    _estimate_gas_dict = {
        entry['name']: entry['gas']
        for entry in smc_json["abi"]
        if entry['type'] == 'function'
    }  # type: Dict[str, int]

    def __init__(self,
                 *args: Any,
                 default_priv_key: datatypes.PrivateKey,
                 config: Dict[str, Any],
                 **kwargs: Any) -> None:
        self.default_priv_key = default_priv_key
        self.default_sender_address = self.default_priv_key.public_key.to_canonical_address()
        self.config = config

        super().__init__(*args, **kwargs)

    #
    # property
    #
    @property
    def basic_call_context(self) -> Dict[str, Any]:
        return make_call_context(
            sender_address=self.default_sender_address,
        )

    #
    # Public variable getter functions
    #
    def does_notary_exist(self, notary_address: Address) -> bool:
        return self.functions.does_notary_exist(notary_address).call(self.basic_call_context)

    def get_notary_info(self, notary_address: Address) -> Tuple[int, int]:
        return self.functions.get_notary_info(notary_address).call(self.basic_call_context)

    def notary_pool_len(self) -> int:
        return self.functions.notary_pool_len().call(self.basic_call_context)

    def notary_pool(self, pool_index: int) -> List[Address]:
        notary_address = self.functions.notary_pool(pool_index).call(self.basic_call_context)
        return to_canonical_address(notary_address)

    def empty_slots_stack_top(self) -> int:
        return self.functions.empty_slots_stack_top().call(self.basic_call_context)

    def empty_slots_stack(self, stack_index: int) -> List[int]:
        return self.functions.empty_slots_stack(stack_index).call(self.basic_call_context)

    def current_period_notary_sample_size(self) -> int:
        return self.functions.current_period_notary_sample_size().call(self.basic_call_context)

    def next_period_notary_sample_size(self) -> int:
        return self.functions.next_period_notary_sample_size().call(self.basic_call_context)

    def notary_sample_size_updated_period(self) -> int:
        return self.functions.notary_sample_size_updated_period().call(self.basic_call_context)

    def records_updated_period(self, shard_id: int) -> int:
        return self.functions.records_updated_period(shard_id).call(self.basic_call_context)

    def head_collation_period(self, shard_id: int) -> int:
        return self.functions.head_collation_period(shard_id).call(self.basic_call_context)

    def get_member_of_committee(self, shard_id: int, index: int) -> Address:
        notary_address = self.functions.get_member_of_committee(
            shard_id,
            index,
        ).call(self.basic_call_context)
        return to_canonical_address(notary_address)

    def get_collation_chunk_root(self, shard_id: int, period: int) -> Hash32:
        return self.functions.collation_records__chunk_root(
            shard_id,
            period,
        ).call(self.basic_call_context)

    def get_collation_proposer(self, shard_id: int, period: int) -> Address:
        proposer_address = self.functions.collation_records__proposer(
            shard_id,
            period,
        ).call(self.basic_call_context)
        return to_canonical_address(proposer_address)

    def get_collation_is_elected(self, shard_id: int, period: int) -> bool:
        return self.functions.collation_records__is_elected(
            shard_id,
            period,
        ).call(self.basic_call_context)

    def current_vote(self, shard_id: int) -> bytes:
        return self.functions.current_vote(
            shard_id,
        ).call(self.basic_call_context)

    def get_vote_count(self, shard_id: int) -> int:
        return self.functions.get_vote_count(
            shard_id,
        ).call(self.basic_call_context)

    def has_notary_voted(self, shard_id: int, index: int) -> bool:
        return self.functions.has_notary_voted(
            shard_id,
            index,
        ).call(self.basic_call_context)

    def _send_transaction(self,
                          *,
                          func_name: str,
                          args: Iterable[Any],
                          private_key: datatypes.PrivateKey=None,
                          nonce: int=None,
                          chain_id: int=None,
                          gas: int=None,
                          value: int=0,
                          gas_price: int=None,
                          data: bytes=None) -> Hash32:
        if gas_price is None:
            gas_price = self.config['GAS_PRICE']
        if private_key is None:
            private_key = self.private_key
        if nonce is None:
            nonce = self.web3.eth.getTransactionCount(private_key.public_key.to_checksum_address())
        build_transaction_detail = make_transaction_context(
            nonce=nonce,
            gas=gas,
            chain_id=chain_id,
            value=value,
            gas_price=gas_price,
            data=data,
        )
        func_instance = getattr(self.functions, func_name)
        unsigned_transaction = func_instance(*args).buildTransaction(
            transaction=build_transaction_detail,
        )
        signed_transaction_dict = self.web3.eth.account.signTransaction(
            unsigned_transaction,
            private_key.to_hex(),
        )
        tx_hash = self.web3.eth.sendRawTransaction(signed_transaction_dict['rawTransaction'])
        return tx_hash

    #
    # Transactions
    #
    def register_notary(self,
                        private_key: datatypes.PrivateKey=None,
                        gas_price: int=None) -> Hash32:
        gas = self._estimate_gas_dict['register_notary']
        tx_hash = self._send_transaction(
            func_name='register_notary',
            args=[],
            private_key=private_key,
            value=self.config['NOTARY_DEPOSIT'],
            gas=gas,
            gas_price=gas_price,
        )
        return tx_hash

    def deregister_notary(self,
                          private_key: datatypes.PrivateKey=None,
                          gas_price: int=None) -> Hash32:
        gas = self._estimate_gas_dict['deregister_notary']
        tx_hash = self._send_transaction(
            func_name='deregister_notary',
            args=[],
            private_key=private_key,
            gas=gas,
            gas_price=gas_price,
        )
        return tx_hash

    def release_notary(self,
                       private_key: datatypes.PrivateKey=None,
                       gas_price: int=None) -> Hash32:
        gas = self._estimate_gas_dict['release_notary']
        tx_hash = self._send_transaction(
            func_name='release_notary',
            args=[],
            private_key=private_key,
            gas=gas,
            gas_price=gas_price,
        )
        return tx_hash

    def add_header(self,
                   *,
                   shard_id: int,
                   period: int,
                   chunk_root: Hash32,
                   private_key: datatypes.PrivateKey=None,
                   gas_price: int=None) -> Hash32:
        gas = self._estimate_gas_dict['add_header']
        tx_hash = self._send_transaction(
            func_name='add_header',
            args=[
                shard_id,
                period,
                chunk_root,
            ],
            private_key=private_key,
            gas=gas,
            gas_price=gas_price,
        )
        return tx_hash

    def submit_vote(self,
                    *,
                    shard_id: int,
                    period: int,
                    chunk_root: Hash32,
                    index: int,
                    private_key: datatypes.PrivateKey=None,
                    gas_price: int=None) -> Hash32:
        gas = self._estimate_gas_dict['submit_vote']
        tx_hash = self._send_transaction(
            func_name='submit_vote',
            args=[
                shard_id,
                period,
                chunk_root,
                index,
            ],
            private_key=private_key,
            gas=gas,
            gas_price=gas_price,
        )
        return tx_hash


================================================
FILE: sharding/handler/utils/__init__.py
================================================


================================================
FILE: sharding/handler/utils/log_parser.py
================================================
from typing import (
    Any,
    Dict,
    List,
    Tuple,
    Union,
)

from eth_utils import (
    to_canonical_address,
    decode_hex,
    big_endian_to_int,
)
from eth_typing import (
    Address,
)

from sharding.contracts.utils.smc_utils import (
    get_smc_json,
)
from sharding.handler.exceptions import (
    LogParsingError,
)


class LogParser(object):
    def __init__(self, *, event_name: str, log: Dict[str, Any]) -> None:
        event_abi = self._extract_event_abi(event_name=event_name)

        topics = []
        data = []
        for item in event_abi["inputs"]:
            if item['indexed'] is True:
                topics.append((item['name'], item['type']))
            else:
                data.append((item['name'], item['type']))

        self._set_topic_value(topics=topics, log=log)
        self._set_data_value(data=data, log=log)

    def _extract_event_abi(self, *, event_name: str) -> Dict[str, Any]:
        for func in get_smc_json()['abi']:
            if func['name'] == event_name and func['type'] == 'event':
                return func
        raise LogParsingError("Can not find event {}".format(event_name))

    def _set_topic_value(self, *, topics: List[Tuple[str, Any]], log: Dict[str, Any]) -> None:
        if len(topics) != len(log['topics'][1:]):
            raise LogParsingError(
                "Error parsing log topics, expect"
                "{} topics but get {}.".format(len(topics), len(log['topics'][1:]))
            )
        for (i, topic) in enumerate(topics):
            val = self._parse_value(val_type=topic[1], val=log['topics'][i + 1])
            setattr(self, topic[0], val)

    def _set_data_value(self, *, data: List[Tuple[str, Any]], log: Dict[str, Any]) -> None:
        data_bytes = decode_hex(log['data'])
        if len(data) * 32 != len(data_bytes):
            raise LogParsingError(
                "Error parsing log data, expect"
                "{} data but get {}.".format(len(data), len(data_bytes))
            )
        for (i, (name, type_)) in enumerate(data):
            val = self._parse_value(val_type=type_, val=data_bytes[i * 32: (i + 1) * 32])
            setattr(self, name, val)

    def _parse_value(self, *, val_type: str, val: bytes) -> Union[bool, Address, bytes, int]:
        if val_type == 'bool':
            return bool(big_endian_to_int(val))
        elif val_type == 'address':
            return to_canonical_address(val[-20:])
        elif val_type == 'bytes32':
            return val
        elif 'int' in val_type:
            return big_endian_to_int(val)
        else:
            raise LogParsingError(
                "Error parsing the type of given value. Expect bool/address/bytes32/int*"
                "but get {}.".format(val_type)
            )


================================================
FILE: sharding/handler/utils/shard_tracker_utils.py
================================================
from typing import (
    Union,
)

from eth_utils import (
    event_abi_to_log_topic,
    to_checksum_address,
)
from eth_typing import (
    Address,
)

from sharding.contracts.utils.smc_utils import (
    get_smc_json,
)


def to_log_topic_address(address: Union[Address, str]) -> str:
    return '0x' + to_checksum_address(address)[2:].rjust(64, '0')


def get_event_signature_from_abi(event_name: str) -> bytes:
    for function in get_smc_json()['abi']:
        if function['name'] == event_name and function['type'] == 'event':
            return event_abi_to_log_topic(function)
    raise ValueError("Event with name {} not found".format(event_name))


================================================
FILE: sharding/handler/utils/smc_handler_utils.py
================================================
from typing import (
    Any,
    Generator,
    Tuple,
)

from eth_utils import (
    is_address,
    to_checksum_address,
    to_dict,
)
from eth_typing import (
    Address,
)


@to_dict
def make_call_context(sender_address: Address,
                      gas: int=None,
                      value: int=None,
                      gas_price: int=None,
                      data: bytes=None) -> Generator[Tuple[str, Any], None, None]:
    """
    Makes the context for message call.
    """
    if not is_address(sender_address):
        raise ValueError('Message call sender provided is not an address')
    # 'from' is required in eth_tester
    yield 'from', to_checksum_address(sender_address)
    if gas is not None:
        yield 'gas', gas
    if value is not None:
        yield 'value', value
    if gas_price is not None:
        yield 'gas_price', gas_price
    if data is not None:
        yield 'data', data


@to_dict
def make_transaction_context(nonce: int,
                             gas: int,
                             chain_id: int=None,
                             value: int=None,
                             gas_price: int=None,
                             data: bytes=None) -> Generator[Tuple[str, Any], None, None]:
    """
    Makes the context for transaction call.
    """

    if not (isinstance(nonce, int) and nonce >= 0):
        raise ValueError('nonce should be provided as non-negative integer')
    if not (isinstance(gas, int) and gas >= 0):
        raise ValueError('gas should be provided as positive integer')
    yield 'nonce', nonce
    yield 'gas', gas
    yield 'chainId', chain_id
    if value is not None:
        yield 'value', value
    if gas_price is not None:
        yield 'gasPrice', gas_price
    if data is not None:
        yield 'data', data


================================================
FILE: sharding/handler/utils/web3_utils.py
================================================
import rlp

from evm.rlp.transactions import (
    BaseTransaction,
)
from web3 import (
    Web3,
)

from eth_utils import (
    to_checksum_address,
)

from typing import (
    List,
    Tuple,
)
from eth_typing import (
    Address,
    Hash32,
)


def get_code(w3: Web3, address: Address) -> bytes:
    return w3.eth.getCode(to_checksum_address(address))


def get_nonce(w3: Web3, address: Address) -> int:
    return w3.eth.getTransactionCount(to_checksum_address(address))


def take_snapshot(w3: Web3) -> int:
    return w3.testing.snapshot()


def revert_to_snapshot(w3: Web3, snapshot_id: int) -> None:
    w3.testing.revert(snapshot_id)


def mine(w3: Web3, num_blocks: int) -> None:
    w3.testing.mine(num_blocks)


def send_raw_transaction(w3: Web3, raw_transaction: BaseTransaction) -> Hash32:
    raw_transaction_bytes = rlp.encode(raw_transaction)
    raw_transaction_hex = w3.toHex(raw_transaction_bytes)
    transaction_hash = w3.eth.sendRawTransaction(raw_transaction_hex)
    return transaction_hash


def get_recent_block_hashes(w3: Web3, history_size: int) -> Tuple[Hash32, ...]:
    block = w3.eth.getBlock('latest')
    recent_hashes = []

    for _ in range(history_size):
        recent_hashes.append(block['hash'])
        # break the loop if we hit the genesis block.
        if block['number'] == 0:
            break
        block = w3.eth.getBlock(block['parentHash'])

    return tuple(reversed(recent_hashes))


def get_canonical_chain(w3: Web3,
                        recent_block_hashes: List[Hash32],
                        history_size: int) -> Tuple[List[Hash32], Tuple[Hash32, ...]]:
    block = w3.eth.getBlock('latest')

    new_block_hashes = []

    for _ in range(history_size):
        if block['hash'] in recent_block_hashes:
            break
        new_block_hashes.append(block['hash'])
        block = w3.eth.getBlock(block['parentHash'])
    else:
        raise Exception('No common ancestor found')

    first_common_ancestor_idx = recent_block_hashes.index(block['hash'])

    revoked_hashes = recent_block_hashes[first_common_ancestor_idx + 1:]

    # reverse it to comply with the order of `self.recent_block_hashes`
    reversed_new_block_hashes = tuple(reversed(new_block_hashes))

    return revoked_hashes, reversed_new_block_hashes


================================================
FILE: tests/__init__.py
================================================


================================================
FILE: tests/conftest.py
================================================
import pytest

from web3 import (
    Web3,
)

from web3.providers.eth_tester import (
    EthereumTesterProvider,
)

from eth_tester import (
    EthereumTester,
    PyEVMBackend,
)

from eth_tester.backends.pyevm.main import (
    get_default_account_keys,
)
from sharding.handler.smc_handler import (
    SMC as SMCFactory,
)
from sharding.handler.utils.web3_utils import (
    get_code,
)
from tests.handler.utils.config import (
    get_sharding_testing_config,
)


@pytest.fixture(scope="session")
def smc_testing_config():
    return get_sharding_testing_config()


@pytest.fixture
def smc_handler(smc_testing_config):
    eth_tester = EthereumTester(
        backend=PyEVMBackend(),
        auto_mine_transactions=False,
    )
    provider = EthereumTesterProvider(eth_tester)
    w3 = Web3(provider)
    if hasattr(w3.eth, "enable_unaudited_features"):
        w3.eth.enable_unaudited_features()

    private_key = get_default_account_keys()[0]

    # deploy smc contract
    SMC = w3.eth.contract(ContractFactoryClass=SMCFactory)
    constructor_kwargs = {
        "_SHARD_COUNT": smc_testing_config["SHARD_COUNT"],
        "_PERIOD_LENGTH": smc_testing_config["PERIOD_LENGTH"],
        "_LOOKAHEAD_LENGTH": smc_testing_config["LOOKAHEAD_LENGTH"],
        "_COMMITTEE_SIZE": smc_testing_config["COMMITTEE_SIZE"],
        "_QUORUM_SIZE": smc_testing_config["QUORUM_SIZE"],
        "_NOTARY_DEPOSIT": smc_testing_config["NOTARY_DEPOSIT"],
        "_NOTARY_LOCKUP_LENGTH": smc_testing_config["NOTARY_LOCKUP_LENGTH"],
    }
    eth_tester.enable_auto_mine_transactions()
    deployment_tx_hash = SMC.constructor(**constructor_kwargs).transact()
    deployment_receipt = w3.eth.waitForTransactionReceipt(deployment_tx_hash, timeout=0)
    eth_tester.disable_auto_mine_transactions()

    assert get_code(w3, deployment_receipt.contractAddress) != b''
    smc_handler = SMC(
        address=deployment_receipt.contractAddress,
        default_priv_key=private_key,
        config=smc_testing_config,
    )

    return smc_handler


================================================
FILE: tests/contract/__init__.py
================================================


================================================
FILE: tests/contract/test_add_header.py
================================================
from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    batch_register,
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)


def test_normal_add_header(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~2 and fast forward to next period
    batch_register(smc_handler, 0, 2)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1
    # Check that collation records of shard 0 and shard 1 have not been updated before
    assert smc_handler.records_updated_period(0) == 0
    assert smc_handler.records_updated_period(1) == 0

    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 0 has been updated
    assert smc_handler.records_updated_period(0) == 1
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == CHUNK_ROOT_1_0

    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 2

    CHUNK_ROOT_2_0 = b'\x20' * 32
    smc_handler.add_header(
        shard_id=0,
        period=2,
        chunk_root=CHUNK_ROOT_2_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 0 has been updated
    assert smc_handler.records_updated_period(0) == 2
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=2) == CHUNK_ROOT_2_0
    # Check that collation record of shard 1 has never been updated
    assert smc_handler.records_updated_period(1) == 0

    CHUNK_ROOT_2_1 = b'\x21' * 32
    smc_handler.add_header(
        shard_id=1,
        period=2,
        chunk_root=CHUNK_ROOT_2_1,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 1 has been updated
    assert smc_handler.records_updated_period(1) == 2
    assert smc_handler.get_collation_chunk_root(shard_id=1, period=2) == CHUNK_ROOT_2_1


def test_add_header_wrong_period(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~2 and fast forward to next period
    batch_register(smc_handler, 0, 2)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    BLANK_CHUNK_ROOT = b'\x00' * 32
    CHUNK_ROOT_1_0 = b'\x10' * 32
    # Attempt to add collation record with wrong period specified
    tx_hash = smc_handler.add_header(
        shard_id=0,
        period=0,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that collation record of shard 0 has not been updated and transaction consume all gas
    # and no logs has been emitted
    assert smc_handler.records_updated_period(0) == 0
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == BLANK_CHUNK_ROOT
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0

    # Second attempt to add collation record with wrong period specified
    tx_hash = smc_handler.add_header(
        shard_id=0,
        period=2,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that collation record of shard 0 has not been updated and transaction consume all gas
    # and no logs has been emitted
    assert smc_handler.records_updated_period(0) == 0
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == BLANK_CHUNK_ROOT
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0

    # Add correct collation record
    smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 0 has been updated
    assert smc_handler.records_updated_period(0) == 1
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == CHUNK_ROOT_1_0


def test_add_header_wrong_shard(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    shard_count = smc_handler.config['SHARD_COUNT']

    # Register notary 0~2 and fast forward to next period
    batch_register(smc_handler, 0, 2)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    BLANK_CHUNK_ROOT = b'\x00' * 32
    CHUNK_ROOT_1_0 = b'\x10' * 32
    # Attempt to add collation record with illegal shard_id specified
    tx_hash = smc_handler.add_header(
        shard_id=shard_count + 1,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that collation record of shard 0 has not been updated and transaction consume all gas
    # and no logs has been emitted
    assert smc_handler.records_updated_period(0) == 0
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == BLANK_CHUNK_ROOT
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0

    # Second attempt to add collation record with illegal shard_id specified
    tx_hash = smc_handler.add_header(
        shard_id=-1,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that collation record of shard 0 has not been updated and transaction consume all gas
    # and no logs has been emitted
    assert smc_handler.records_updated_period(0) == 0
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == BLANK_CHUNK_ROOT
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0

    # Add correct collation record
    smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 0 has been updated
    assert smc_handler.records_updated_period(0) == 1
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == CHUNK_ROOT_1_0


def test_double_add_header(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~2 and fast forward to next period
    batch_register(smc_handler, 0, 2)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3=w3, num_blocks=1)
    # Check that collation record of shard 0 has been updated
    assert smc_handler.records_updated_period(0) == 1
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == CHUNK_ROOT_1_0

    # Attempt to add collation record again with same collation record
    tx_hash = smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that transaction consume all gas and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0

    # Attempt to add collation record again with different chunk root
    tx_hash = smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=b'\x56' * 32,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)
    # Check that collation record of shard 0 remains the same and transaction consume all gas
    # and no logs has been emitted
    assert smc_handler.records_updated_period(0) == 1
    assert smc_handler.get_collation_chunk_root(shard_id=0, period=1) == CHUNK_ROOT_1_0
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0


================================================
FILE: tests/contract/test_compile.py
================================================
from vyper import compiler

from sharding.contracts.utils.smc_utils import (
    get_smc_json,
    get_smc_source_code,
)


def test_compile_smc():
    compiled_smc_json = get_smc_json()

    vmc_code = get_smc_source_code()
    abi = compiler.mk_full_signature(vmc_code)
    bytecode = compiler.compile(vmc_code)
    bytecode_hex = '0x' + bytecode.hex()

    assert abi == compiled_smc_json["abi"]
    assert bytecode_hex == compiled_smc_json["bytecode"]


================================================
FILE: tests/contract/test_log_emission.py
================================================
from sharding.handler.shard_tracker import (  # noqa: F401
    ShardTracker,
)
from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)
from tests.contract.utils.sample_helper import (
    sampling,
)


def test_log_emission(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    shard_tracker = ShardTracker(
        w3=w3,
        config=smc_handler.config,
        shard_id=0,
        smc_handler_address=smc_handler.address,
    )
    notary = NotaryAccount(0)

    # Register
    smc_handler.register_notary(private_key=notary.private_key)
    mine(w3, 1)
    # Check that log was successfully emitted
    log = shard_tracker.get_register_notary_logs()[0]
    assert log.index_in_notary_pool == 0 and log.notary == notary.canonical_address
    fast_forward(smc_handler, 1)

    # Add header
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=notary.private_key,
    )
    mine(w3, 1)
    # Check that log was successfully emitted
    log = shard_tracker.get_add_header_logs()[0]
    assert log.period == 1 and log.shard_id == 0 and log.chunk_root == CHUNK_ROOT_1_0

    # Submit vote
    sample_index = 0
    pool_index = sampling(smc_handler, 0)[sample_index]
    smc_handler.submit_vote(
        shard_id=0,
        period=1,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(pool_index).private_key,
    )
    mine(w3, 1)
    # Check that log was successfully emitted
    log = shard_tracker.get_submit_vote_logs()[0]
    assert log.period == 1 and log.shard_id == 0 and log.chunk_root == CHUNK_ROOT_1_0 and \
        log.notary == NotaryAccount(pool_index).canonical_address
    fast_forward(smc_handler, 1)

    # Deregister
    smc_handler.deregister_notary(private_key=notary.private_key)
    mine(w3, 1)
    # Check that log was successfully emitted
    log = shard_tracker.get_deregister_notary_logs()[0]
    assert log.index_in_notary_pool == 0 and log.notary == notary.canonical_address and \
        log.deregistered_period == 2
    # Fast foward to end of lock up
    fast_forward(smc_handler, smc_handler.config['NOTARY_LOCKUP_LENGTH'] + 1)

    # Release
    smc_handler.release_notary(private_key=notary.private_key)
    mine(w3, 1)
    # Check that log was successfully emitted
    log = shard_tracker.get_release_notary_logs()[0]
    assert log.index_in_notary_pool == 0 and log.notary == notary.canonical_address

    # Test fetching logs in past period
    assert shard_tracker.get_register_notary_logs(from_period=0, to_period=0)
    assert shard_tracker.get_add_header_logs(from_period=1, to_period=1)
    assert shard_tracker.get_submit_vote_logs(from_period=1, to_period=1)
    assert shard_tracker.get_deregister_notary_logs(from_period=2, to_period=2)
    assert shard_tracker.get_release_notary_logs(
        from_period=(3 + smc_handler.config['NOTARY_LOCKUP_LENGTH']),
        to_period=(3 + smc_handler.config['NOTARY_LOCKUP_LENGTH'])
    )


================================================
FILE: tests/contract/test_notary_sample.py
================================================
from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    update_notary_sample_size,
    batch_register,
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)
from tests.contract.utils.sample_helper import (
    get_notary_pool_list,
    get_committee_list,
    get_sample_result,
)


def test_normal_update_notary_sample_size(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    _, notary_0_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    assert notary_0_pool_index == 0
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert (notary_0_pool_index + 1) == next_period_notary_sample_size

    notary_1 = NotaryAccount(1)

    # Register notary 1
    smc_handler.register_notary(private_key=notary_1.private_key)
    mine(w3, 1)

    _, notary_1_pool_index = smc_handler.get_notary_info(
        notary_1.checksum_address
    )
    assert notary_1_pool_index == 1
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert (notary_1_pool_index + 1) == next_period_notary_sample_size

    # Check that it's not yet the time to update notary sample size,
    # i.e., current period is the same as latest period the notary sample size was updated.
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    notary_sample_size_updated_period = smc_handler.notary_sample_size_updated_period()
    assert current_period == notary_sample_size_updated_period

    # Check that current_period_notary_sample_size has not been updated before
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert 0 == current_period_notary_sample_size

    # Try updating notary sample size
    update_notary_sample_size(smc_handler)
    # Check that current_period_notary_sample_size is not updated,
    # i.e., updating notary sample size failed.
    assert 0 == current_period_notary_sample_size

    # fast forward to next period
    fast_forward(smc_handler, 1)

    # Register notary 2
    # NOTE: Registration would also invoke update_notary_sample_size function
    notary_2 = NotaryAccount(2)
    smc_handler.register_notary(private_key=notary_2.private_key)
    mine(w3, 1)

    # Check that current_period_notary_sample_size is updated,
    # i.e., it is assigned the value of next_period_notary_sample_size.
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert next_period_notary_sample_size == current_period_notary_sample_size

    # Check that notary sample size is updated in this period
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    notary_sample_size_updated_period = smc_handler.notary_sample_size_updated_period()
    assert current_period == notary_sample_size_updated_period


def test_register_then_deregister(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0 first
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    _, notary_0_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    assert notary_0_pool_index == 0
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert (notary_0_pool_index + 1) == next_period_notary_sample_size

    # Then deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check that next_period_notary_sample_size remains the same
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert (notary_0_pool_index + 1) == next_period_notary_sample_size


def test_deregister_then_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0 and fast forward to next period
    smc_handler.register_notary(private_key=notary_0.private_key)
    fast_forward(smc_handler, 1)

    # Deregister notary 0 first
    # NOTE: Deregistration would also invoke update_notary_sample_size function
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check that current_period_notary_sample_size is updated
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == 1

    notary_1 = NotaryAccount(1)

    # Then register notary 1
    smc_handler.register_notary(private_key=notary_1.private_key)
    mine(w3, 1)

    _, notary_1_pool_index = smc_handler.get_notary_info(
        notary_1.checksum_address
    )
    assert notary_1_pool_index == 0
    # Check that next_period_notary_sample_size remains the same
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert (notary_1_pool_index + 1) == next_period_notary_sample_size


def test_series_of_deregister_starting_from_top_of_the_stack(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)
    notary_1 = NotaryAccount(1)
    notary_2 = NotaryAccount(2)

    # Register notary 0~2
    batch_register(smc_handler, 0, 2)
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 3

    # Fast forward to next period
    fast_forward(smc_handler, 1)

    # Deregister from notary 2 to notary 0
    # Deregister notary 2
    smc_handler.deregister_notary(private_key=notary_2.private_key)
    mine(w3, 1)
    # Check that current_period_notary_sample_size is updated
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == 3
    # Check that next_period_notary_sample_size remains the samev
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 3
    # Deregister notary 1
    smc_handler.deregister_notary(private_key=notary_1.private_key)
    mine(w3, 1)
    # Check that next_period_notary_sample_size remains the same
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 3
    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check that next_period_notary_sample_size remains the same
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 3

    # Fast forward to next period
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == next_period_notary_sample_size


def test_series_of_deregister_starting_from_bottom_of_the_stack(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)
    notary_1 = NotaryAccount(1)
    notary_2 = NotaryAccount(2)

    # Register notary 0~2
    batch_register(smc_handler, 0, 2)

    # Fast forward to next period
    fast_forward(smc_handler, 1)

    # Deregister from notary 0 to notary 2
    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    _, notary_0_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    # Check that next_period_notary_sample_size remains the same
    assert next_period_notary_sample_size == 3
    # Deregister notary 1
    smc_handler.deregister_notary(private_key=notary_1.private_key)
    mine(w3, 1)
    _, notary_1_pool_index = smc_handler.get_notary_info(
        notary_1.checksum_address
    )
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    # Check that next_period_notary_sample_size remains the same
    assert next_period_notary_sample_size == 3
    # Deregister notary 2
    smc_handler.deregister_notary(private_key=notary_2.private_key)
    mine(w3, 1)
    # Check that current_period_notary_sample_size is updated
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == 3
    _, notary_2_pool_index = smc_handler.get_notary_info(
        notary_2.checksum_address
    )
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 3

    # Fast forward to next period
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == next_period_notary_sample_size


def test_get_member_of_committee_without_updating_sample_size(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~5 and fast forward to next period
    batch_register(smc_handler, 0, 5)
    fast_forward(smc_handler, 1)

    # Register notary 6~8
    batch_register(smc_handler, 6, 8)

    # Check that sample-size-related values match
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    notary_sample_size_updated_period = smc_handler.notary_sample_size_updated_period()
    assert notary_sample_size_updated_period == current_period
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == 6
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 9

    # Fast forward to next period
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    notary_sample_size_updated_period = smc_handler.notary_sample_size_updated_period()
    assert notary_sample_size_updated_period == current_period - 1

    shard_0_committee_list = get_committee_list(smc_handler, 0)
    # Check that get_committee_list did generate committee list
    assert len(shard_0_committee_list) > 0
    for (i, notary) in enumerate(shard_0_committee_list):
        assert smc_handler.get_member_of_committee(0, i) == notary


def test_get_member_of_committee_with_updated_sample_size(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)
    # Check that sample-size-related values match
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    notary_sample_size_updated_period = smc_handler.notary_sample_size_updated_period()
    assert notary_sample_size_updated_period == current_period
    current_period_notary_sample_size = smc_handler.current_period_notary_sample_size()
    assert current_period_notary_sample_size == 9
    next_period_notary_sample_size = smc_handler.next_period_notary_sample_size()
    assert next_period_notary_sample_size == 9

    shard_0_committee_list = get_committee_list(smc_handler, 0)
    for (i, notary) in enumerate(shard_0_committee_list):
        assert smc_handler.get_member_of_committee(0, i) == notary


def test_committee_lists_generated_are_different(smc_handler):  # noqa: F811
    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)

    shard_0_committee_list = get_committee_list(smc_handler, 0)
    shard_1_committee_list = get_committee_list(smc_handler, 1)
    assert shard_0_committee_list != shard_1_committee_list

    # Fast forward to next period
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)

    new_shard_0_committee_list = get_committee_list(smc_handler, 0)
    assert new_shard_0_committee_list != shard_0_committee_list


def test_get_member_of_committee_with_non_member(smc_handler):  # noqa: F811
    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)

    notary_pool_list = get_notary_pool_list(smc_handler)
    shard_0_committee_list = get_committee_list(smc_handler, 0)
    for (i, notary) in enumerate(shard_0_committee_list):
        notary_index = notary_pool_list.index(notary)
        next_notary_index = notary_index + 1 \
            if notary_index < len(notary_pool_list) - 1 else 0
        next_notary = notary_pool_list[next_notary_index]
        assert not (smc_handler.get_member_of_committee(0, i) == next_notary)


def test_committee_change_with_deregister_then_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)

    # Update notary sample size
    update_notary_sample_size(smc_handler)

    notary_pool_list = get_notary_pool_list(smc_handler)
    # Choose the first sampled notary and deregister it
    notary = get_committee_list(smc_handler, 0)[0]
    notary_index = notary_pool_list.index(notary)
    smc_handler.deregister_notary(private_key=NotaryAccount(notary_index).private_key)
    mine(w3, 1)
    # Check that first slot in committee is now empty
    assert smc_handler.get_member_of_committee(0, 0) == b'\x00' * 20

    # Register notary 9
    smc_handler.register_notary(private_key=NotaryAccount(9).private_key)
    mine(w3, 1)
    # Check that first slot in committee is replaced by notary 9
    assert smc_handler.get_member_of_committee(0, 0) == NotaryAccount(9).canonical_address


def test_get_sample_result(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)

    # Update notary sample size
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    update_notary_sample_size(smc_handler)

    # Get all committee of current period
    committee_group = []
    for shard_id in range(smc_handler.config['SHARD_COUNT']):
        committee_group.append(get_committee_list(smc_handler, shard_id))

    # Get sampling result for notary 0
    notary_0 = NotaryAccount(0)
    _, notary_0_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    notary_0_sampling_result = get_sample_result(smc_handler, notary_0_pool_index)

    for (period, shard_id, sampling_index) in notary_0_sampling_result:
        assert period == current_period
        # Check that notary is correctly sampled in get_committee_list
        assert committee_group[shard_id][sampling_index] == notary_0.canonical_address
        # Check that notary is correctly sampled in SMC
        assert smc_handler.get_member_of_committee(shard_id, sampling_index) \
            == notary_0.canonical_address


================================================
FILE: tests/contract/test_registry_management.py
================================================
from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    batch_register,
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)


def test_normal_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert not does_notary_exist
    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_deregistered_period, notary_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    assert notary_deregistered_period == 0 and notary_pool_index == 0
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    notary_1 = NotaryAccount(1)

    notary_2 = NotaryAccount(2)

    # Register notary 1 and notary 2
    batch_register(smc_handler, 0, 2)

    does_notary_exist = smc_handler.does_notary_exist(notary_1.checksum_address)
    assert does_notary_exist
    notary_deregistered_period, notary_pool_index = smc_handler.get_notary_info(
        notary_1.checksum_address
    )
    assert notary_deregistered_period == 0 and notary_pool_index == 1

    does_notary_exist = smc_handler.does_notary_exist(notary_2.checksum_address)
    assert does_notary_exist
    notary_deregistered_period, notary_pool_index = smc_handler.get_notary_info(
        notary_2.checksum_address
    )
    assert notary_deregistered_period == 0 and notary_pool_index == 2

    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 3


def test_register_without_enough_ether(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert not does_notary_exist

    # Register without enough ether
    smc_handler._send_transaction(
        func_name='register_notary',
        args=[],
        private_key=notary_0.private_key,
        value=smc_handler.config['NOTARY_DEPOSIT'] // 10000,
        gas=smc_handler._estimate_gas_dict['register_notary'],
    )
    mine(w3, 1)

    # Check that the registration failed
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert not does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0


def test_double_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    # Try register notary 0 again
    tx_hash = smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check pool remain the same and the transaction consume all gas
    # and no logs has been emitted
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0


def test_normal_deregister(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    # Fast foward
    fast_forward(smc_handler, 1)

    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_deregistered_period, notary_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    assert notary_deregistered_period == current_period
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0


def test_deregister_then_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    # Fast foward
    fast_forward(smc_handler, 1)

    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_deregistered_period, notary_pool_index = smc_handler.get_notary_info(
        notary_0.checksum_address
    )
    assert notary_deregistered_period == current_period
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0

    # Register again right away
    tx_hash = smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check pool remain the same and the transaction consume all gas
    # and no logs has been emitted
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0


def test_normal_release_notary(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    # Fast foward
    fast_forward(smc_handler, 1)

    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0

    # Fast foward to end of lock up
    fast_forward(smc_handler, smc_handler.config['NOTARY_LOCKUP_LENGTH'] + 1)

    # Release notary 0
    smc_handler.release_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert not does_notary_exist


def test_instant_release_notary(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    # Fast foward
    fast_forward(smc_handler, 1)

    # Deregister notary 0
    smc_handler.deregister_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 0

    # Instant release notary 0
    tx_hash = smc_handler.release_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    # Check registry remain the same and the transaction consume all gas
    # and no logs has been emitted
    does_notary_exist = smc_handler.does_notary_exist(notary_0.checksum_address)
    assert does_notary_exist
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0


def test_deregister_and_new_notary_register(smc_handler):  # noqa: F811
    w3 = smc_handler.web3

    notary_0 = NotaryAccount(0)

    # Register notary 0
    smc_handler.register_notary(private_key=notary_0.private_key)
    mine(w3, 1)
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 1

    notary_2 = NotaryAccount(2)

    # Register notary 1~3
    batch_register(smc_handler, 1, 3)

    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 4
    # Check that empty_slots_stack is empty
    empty_slots_stack_top = smc_handler.empty_slots_stack_top()
    assert empty_slots_stack_top == 0

    # Fast foward
    fast_forward(smc_handler, 1)

    # Deregister notary 2
    smc_handler.deregister_notary(private_key=notary_2.private_key)
    mine(w3, 1)
    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 3

    # Check that empty_slots_stack is not empty
    empty_slots_stack_top = smc_handler.empty_slots_stack_top()
    assert empty_slots_stack_top == 1
    _, notary_2_pool_index = smc_handler.get_notary_info(notary_2.checksum_address)
    empty_slots = smc_handler.empty_slots_stack(0)
    # Check that the top empty_slots entry point to notary 2
    assert empty_slots == notary_2_pool_index

    notary_4 = NotaryAccount(4)

    # Register notary 4
    smc_handler.register_notary(private_key=notary_4.private_key)
    mine(w3, 1)

    notary_pool_length = smc_handler.notary_pool_len()
    assert notary_pool_length == 4
    # Check that empty_slots_stack is empty
    empty_slots_stack_top = smc_handler.empty_slots_stack_top()
    assert empty_slots_stack_top == 0
    _, notary_4_pool_index = smc_handler.get_notary_info(notary_4.checksum_address)
    # Check that notary fill in notary 2's spot
    assert notary_4_pool_index == notary_2_pool_index


================================================
FILE: tests/contract/test_submit_vote.py
================================================
import pytest

from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    batch_register,
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)
from tests.contract.utils.sample_helper import (
    sampling,
    get_sample_result,
)


def test_normal_submit_vote(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add collation record
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    # Get the first notary in the sample list in this period
    sample_index = 0
    pool_index = sampling(smc_handler, shard_id)[sample_index]
    # Check that voting record does not exist prior to voting
    assert smc_handler.get_vote_count(shard_id) == 0
    assert not smc_handler.has_notary_voted(shard_id, sample_index)
    # First notary vote
    smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)
    # Check that vote has been casted successfully
    assert smc_handler.get_vote_count(shard_id) == 1
    assert smc_handler.has_notary_voted(shard_id, sample_index)

    # Check that collation is not elected and forward to next period
    assert not smc_handler.get_collation_is_elected(shard_id=shard_id, period=current_period)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 2

    # Add collation record
    CHUNK_ROOT_2_0 = b'\x20' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_2_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    # Check that vote count is zero
    assert smc_handler.get_vote_count(shard_id) == 0
    # Keep voting until the collation is elected.
    for (sample_index, pool_index) in enumerate(sampling(smc_handler, shard_id)):
        if smc_handler.get_collation_is_elected(shard_id=shard_id, period=current_period):
            assert smc_handler.get_vote_count(shard_id) == smc_handler.config['QUORUM_SIZE']
            break
        # Check that voting record does not exist prior to voting
        assert not smc_handler.has_notary_voted(shard_id, sample_index)
        # Vote
        smc_handler.submit_vote(
            shard_id=shard_id,
            period=current_period,
            chunk_root=CHUNK_ROOT_2_0,
            index=sample_index,
            private_key=NotaryAccount(index=pool_index).private_key,
        )
        mine(w3, 1)
        # Check that vote has been casted successfully
        assert smc_handler.has_notary_voted(shard_id, sample_index)
    # Check that the collation is indeed elected.
    assert smc_handler.get_collation_is_elected(shard_id=shard_id, period=current_period)


def test_double_submit_vote(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add collation record
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    # Get the first notary in the sample list in this period and vote
    sample_index = 0
    pool_index = sampling(smc_handler, shard_id)[sample_index]
    smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)
    # Check that vote has been casted successfully
    assert smc_handler.get_vote_count(shard_id) == 1
    assert smc_handler.has_notary_voted(shard_id, sample_index)

    # Attempt to double vote
    tx_hash = smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)
    # Check that transaction failed and vote count remains the same
    # and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0
    assert smc_handler.get_vote_count(shard_id) == 1


def test_submit_vote_by_notary_sampled_multiple_times(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Here we only register 5 notaries so it's guaranteed that at least
    # one notary is going to be sampled twice.
    # Register notary 0~4 and fast forward to next period
    batch_register(smc_handler, 0, 4)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add collation record
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    # Find the notary that's sampled more than one time
    for pool_index in range(5):
        sample_index_list = [
            sample_index
            for (_, _shard_id, sample_index) in get_sample_result(smc_handler, pool_index)
            if _shard_id == shard_id
        ]
        if len(sample_index_list) > 1:
            vote_count = len(sample_index_list)
            for sample_index in sample_index_list:
                smc_handler.submit_vote(
                    shard_id=shard_id,
                    period=current_period,
                    chunk_root=CHUNK_ROOT_1_0,
                    index=sample_index,
                    private_key=NotaryAccount(index=pool_index).private_key,
                )
                mine(w3, 1)
            # Check that every vote is successfully casted even by the same notary
            assert smc_handler.get_vote_count(shard_id) == vote_count
            break


def test_submit_vote_by_non_eligible_notary(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add collation record
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    sample_index = 0
    pool_index = sampling(smc_handler, shard_id)[sample_index]
    wrong_pool_index = 0 if pool_index != 0 else 1
    tx_hash = smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        # Vote by non-eligible notary
        private_key=NotaryAccount(wrong_pool_index).private_key,
    )
    mine(w3, 1)
    # Check that transaction failed and vote count remains the same
    # and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0
    assert smc_handler.get_vote_count(shard_id) == 0
    assert not smc_handler.has_notary_voted(shard_id, sample_index)


def test_submit_vote_without_add_header_first(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    CHUNK_ROOT_1_0 = b'\x10' * 32
    # Get the first notary in the sample list in this period and vote
    sample_index = 0
    pool_index = sampling(smc_handler, shard_id)[sample_index]
    tx_hash = smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)
    # Check that transaction failed and vote count remains the same
    # and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0
    assert smc_handler.get_vote_count(shard_id) == 0
    assert not smc_handler.has_notary_voted(shard_id, sample_index)


@pytest.mark.parametrize(  # noqa: F811
    'period, shard_id, chunk_root, sample_index',
    (
        (-1, 0, b'\x10' * 32, 0),
        (999, 0, b'\x10' * 32, 0),
        (1, -1, b'\x10' * 32, 0),
        (1, 999, b'\x10' * 32, 0),
        (1, 0, b'\xff' * 32, 0),
        (1, 0, b'\x10' * 32, -1),
        (1, 0, b'\x10' * 32, 999),
    )
)
def test_submit_vote_with_invalid_args(smc_handler, period, shard_id, chunk_root, sample_index):
    w3 = smc_handler.web3

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add correct collation record
    smc_handler.add_header(
        shard_id=0,
        period=current_period,
        chunk_root=b'\x10' * 32,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    pool_index = sampling(smc_handler, 0)[0]
    # Vote with provided incorrect arguments
    tx_hash = smc_handler.submit_vote(
        shard_id=shard_id,
        period=period,
        chunk_root=chunk_root,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)
    # Check that transaction failed and vote count remains the same
    # and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0
    assert smc_handler.get_vote_count(shard_id) == 0
    assert not smc_handler.has_notary_voted(shard_id, sample_index)


def test_submit_vote_then_deregister(smc_handler):  # noqa: F811
    w3 = smc_handler.web3
    # We only vote in shard 0 for ease of testing
    shard_id = 0

    # Register notary 0~8 and fast forward to next period
    batch_register(smc_handler, 0, 8)
    fast_forward(smc_handler, 1)
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']
    assert current_period == 1

    # Add collation record
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(index=0).private_key,
    )
    mine(w3, 1)

    sample_index = 0
    pool_index = sampling(smc_handler, shard_id)[sample_index]
    smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=pool_index).private_key,
    )
    mine(w3, 1)

    # Check that vote has been casted successfully
    assert smc_handler.get_vote_count(shard_id) == 1
    assert smc_handler.has_notary_voted(shard_id, sample_index)

    # The notary deregisters
    smc_handler.deregister_notary(private_key=NotaryAccount(pool_index).private_key)
    mine(w3, 1)
    # Check that vote was not effected by deregistration
    assert smc_handler.get_vote_count(shard_id) == 1
    assert smc_handler.has_notary_voted(shard_id, sample_index)

    # Notary 9 registers and takes retired notary's place in pool
    smc_handler.register_notary(private_key=NotaryAccount(9).private_key)
    # Attempt to vote
    tx_hash = smc_handler.submit_vote(
        shard_id=shard_id,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(index=9).private_key,
    )
    mine(w3, 1)

    # Check that transaction failed and vote count remains the same
    # and no logs has been emitted
    assert len(w3.eth.getTransactionReceipt(tx_hash)['logs']) == 0
    assert smc_handler.get_vote_count(shard_id) == 1


================================================
FILE: tests/contract/utils/common_utils.py
================================================
from sharding.handler.utils.web3_utils import (
    mine,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)


def update_notary_sample_size(smc_handler):
    smc_handler._send_transaction(
        func_name='update_notary_sample_size',
        args=[],
        private_key=NotaryAccount(0).private_key,
        gas=smc_handler._estimate_gas_dict['update_notary_sample_size'],
    )
    mine(smc_handler.web3, 1)


def batch_register(smc_handler, start, end):
    assert start <= end
    for i in range(start, end + 1):
        notary = NotaryAccount(i)
        smc_handler.register_notary(private_key=notary.private_key)
    mine(smc_handler.web3, 1)


def fast_forward(smc_handler, num_of_periods):
    assert num_of_periods > 0
    period_length = smc_handler.config['PERIOD_LENGTH']
    block_number = smc_handler.web3.eth.blockNumber
    current_period = block_number // period_length
    blocks_to_the_period = (current_period + num_of_periods) * period_length \
        - block_number
    mine(smc_handler.web3, blocks_to_the_period)


================================================
FILE: tests/contract/utils/notary_account.py
================================================
from eth_tester.backends.pyevm.main import (
    get_default_account_keys,
)


class NotaryAccount:
    index = None

    def __init__(self, index):
        self.index = index

    @property
    def private_key(self):
        return get_default_account_keys()[self.index]

    @property
    def checksum_address(self):
        return self.private_key.public_key.to_checksum_address()

    @property
    def canonical_address(self):
        return self.private_key.public_key.to_canonical_address()


================================================
FILE: tests/contract/utils/sample_helper.py
================================================
from eth_utils import (
    to_list,
    keccak,
    big_endian_to_int,
)

from evm.utils.numeric import (
    int_to_bytes32,
)


@to_list
def get_notary_pool_list(smc_handler):
    """Get the full list of notaries that's currently in notary pool.
    """
    pool_len = smc_handler.notary_pool_len()
    for i in range(pool_len):
        yield smc_handler.notary_pool(i)


@to_list
def sampling(smc_handler, shard_id):
    """The sampling process is the same as the one in SMC(inside the
    `get_member_of_committee` function). It is used to avoid the overhead
    of making contrac call to SMC. The overhead could be quite significant
    if you want to get the complete sampling result since you have to make
    a total of `SHARD_COUNT`*`COMMITTEE_SIZE` times of contract calls.
    """
    w3 = smc_handler.web3
    current_period = w3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']

    # Determine sample size
    if smc_handler.notary_sample_size_updated_period() < current_period:
        sample_size = smc_handler.next_period_notary_sample_size()
    elif smc_handler.notary_sample_size_updated_period() == current_period:
        sample_size = smc_handler.current_period_notary_sample_size()
    else:
        raise Exception("notary_sample_size_updated_period is larger than current period")

    # Get source for pseudo random number generation
    bytes32_shard_id = int_to_bytes32(shard_id)
    entropy_block_number = current_period * smc_handler.config['PERIOD_LENGTH'] - 1
    entropy_block_hash = w3.eth.getBlock(entropy_block_number)['hash']

    for i in range(smc_handler.config['COMMITTEE_SIZE']):
        yield big_endian_to_int(
            keccak(
                entropy_block_hash + bytes32_shard_id + int_to_bytes32(i)
            )
        ) % sample_size


@to_list
def get_committee_list(smc_handler, shard_id):
    """Get committee list in specified shard in current period.
    Returns the list of sampled notaries.
    """
    for notary_pool_index in sampling(smc_handler, shard_id):
        yield smc_handler.notary_pool(notary_pool_index)


@to_list
def get_sample_result(smc_handler, notary_index):
    """Get sampling result for the specified notary. Pass in notary's index in notary pool.
    Returns a list of tuple(period, shard_id, index) indicating in which period on which shard
    is the notary sampled and by which sampling index.
    Note that here sampling index in not the same as notary pool index.
    """
    current_period = smc_handler.web3.eth.blockNumber // smc_handler.config['PERIOD_LENGTH']

    for shard_id in range(smc_handler.config['SHARD_COUNT']):
        for (index, notary_pool_index) in enumerate(sampling(smc_handler, shard_id)):
            if notary_pool_index == notary_index:
                yield (current_period, shard_id, index)


================================================
FILE: tests/handler/__init__.py
================================================


================================================
FILE: tests/handler/test_log_handler.py
================================================
import itertools

import pytest

from cytoolz.dicttoolz import (
    assoc,
)

from web3 import (
    Web3,
)

from web3.providers.eth_tester import (
    EthereumTesterProvider,
)

from eth_utils import (
    event_signature_to_log_topic,
)

from eth_tester import (
    EthereumTester,
    PyEVMBackend,
)
from eth_tester.backends.pyevm.main import (
    get_default_account_keys,
)

from sharding.handler.log_handler import (
    LogHandler,
)
from sharding.handler.utils.web3_utils import (
    mine,
    take_snapshot,
    revert_to_snapshot,
)


code = """
Test: __log__({amount1: num})

@public
def emit_log(log_number: num):
    log.Test(log_number)
"""
abi = [{'name': 'Test', 'inputs': [{'type': 'int128', 'name': 'amount1', 'indexed': False}], 'anonymous': False, 'type': 'event'}, {'name': 'emit_log', 'outputs': [], 'inputs': [{'type': 'int128', 'name': 'log_number'}], 'constant': False, 'payable': False, 'type': 'function'}]  # noqa: E501
bytecode = b'a\x00\xf9V`\x005`\x1cRt\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00` Ro\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff`@R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00``Rt\x01*\x05\xf1\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfd\xab\xf4\x1c\x00`\x80R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xd5\xfa\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\xa0Rc\xd0(}7`\x00Q\x14\x15a\x00\xf4W` `\x04a\x01@74\x15\x15XW``Q`\x045\x80`@Q\x90\x13XW\x80\x91\x90\x12XWPa\x01@Qa\x01`R\x7f\xaeh\x04lU;\x85\xd0\x8bolL6\x92S)\x06\xf3M\x1d\xa6\xcb\x032\x1e\xd6\x96\xca\x0b\xdcL\xad` a\x01`\xa1\x00[[a\x00\x04a\x00\xf9\x03a\x00\x04`\x009a\x00\x04a\x00\xf9\x03`\x00\xf3'  # noqa: E501

test_keys = get_default_account_keys()
privkey = test_keys[0]
default_tx_detail = {
    'from': privkey.public_key.to_checksum_address(),
    'gas': 500000,
}
test_event_signature = event_signature_to_log_topic("Test(int128)")

HISTORY_SIZE = 256


@pytest.fixture
def contract():
    eth_tester = EthereumTester(
        backend=PyEVMBackend(),
        auto_mine_transactions=False,
    )
    provider = EthereumTesterProvider(eth_tester)
    w3 = Web3(provider)
    tx_hash = w3.eth.sendTransaction(assoc(default_tx_detail, 'data', bytecode))
    mine(w3, 1)
    receipt = w3.eth.getTransactionReceipt(tx_hash)
    contract_address = receipt['contractAddress']
    return w3.eth.contract(contract_address, abi=abi, bytecode=bytecode)


def test_get_logs_without_forks(contract, smc_testing_config):
    period_length = smc_testing_config['PERIOD_LENGTH']
    w3 = contract.web3
    log_handler = LogHandler(w3, period_length)
    counter = itertools.count()

    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    logs_block2 = log_handler.get_logs(address=contract.address)
    assert len(logs_block2) == 1
    assert int(logs_block2[0]['data'], 16) == 0
    mine(w3, period_length - 1)

    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    logs_block3 = log_handler.get_logs(address=contract.address)
    assert len(logs_block3) == 1
    assert int(logs_block3[0]['data'], 16) == 1
    mine(w3, period_length - 1)

    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    logs_block4_5 = log_handler.get_logs(address=contract.address)
    assert len(logs_block4_5) == 2
    assert int(logs_block4_5[0]['data'], 16) == 2
    assert int(logs_block4_5[1]['data'], 16) == 3


def test_get_logs_with_forks(contract, smc_testing_config):
    w3 = contract.web3
    log_handler = LogHandler(w3, smc_testing_config['PERIOD_LENGTH'])
    counter = itertools.count()
    snapshot_id = take_snapshot(w3)
    current_block_number = w3.eth.blockNumber

    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    revert_to_snapshot(w3, snapshot_id)
    assert w3.eth.blockNumber == current_block_number
    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    contract.functions.emit_log(next(counter)).transact(default_tx_detail)
    mine(w3, 1)
    logs = log_handler.get_logs()
    # assert len(logs) == 2
    assert int(logs[0]['data'], 16) == 1
    assert int(logs[1]['data'], 16) == 2


================================================
FILE: tests/handler/test_shard_tracker.py
================================================
import logging

import pytest

from sharding.handler.exceptions import (
    LogParsingError,
)
from sharding.handler.utils.log_parser import (
    LogParser,
)
from sharding.handler.shard_tracker import (  # noqa: F401
    ShardTracker,
)
from sharding.handler.utils.web3_utils import (
    mine,
)

from tests.contract.utils.common_utils import (
    batch_register,
    fast_forward,
)
from tests.contract.utils.notary_account import (
    NotaryAccount,
)
from tests.contract.utils.sample_helper import (
    sampling,
)


logger = logging.getLogger('sharding.handler.ShardTracker')


@pytest.mark.parametrize(
    'raw_log, event_name, attr_tuples',
    (
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000000', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf']},  # noqa: E501
            'RegisterNotary',
            [
                ('index_in_notary_pool', 0),
                ('notary', b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf'),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\x16\xc2\x0b\xadZ|\x92l@@\xb1\x15\x93nh\xd6]p\x16\xae\xd5\xe7\x9crKl\x8c\xcf\x06\x9a\xd4\x05', 'blockHash': b'\x94\\\xce\x19\x01:j\xbb\xf8\xba\x19\xcfv\xc3z3}^\xb6>\xa0\x0e\xf74\xe8A\t\x12p\x9a\xf6V', 'blockNumber': 30, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000003', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\xffG\xbc:\x10\xa4]K#\x0b]\x10\xe3wQ\xfej\xa7\x18']},  # noqa: E501
            'RegisterNotary',
            [
                ('index_in_notary_pool', 3),
                ('notary', b'\x1e\xffG\xbc:\x10\xa4]K#\x0b]\x10\xe3wQ\xfej\xa7\x18'),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000000a', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf']},  # noqa: E501
            'DeregisterNotary',
            [
                ('index_in_notary_pool', 5),
                ('notary', b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf'),
                ('deregistered_period', 10),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\x16\xc2\x0b\xadZ|\x92l@@\xb1\x15\x93nh\xd6]p\x16\xae\xd5\xe7\x9crKl\x8c\xcf\x06\x9a\xd4\x05', 'blockHash': b'\x94\\\xce\x19\x01:j\xbb\xf8\xba\x19\xcfv\xc3z3}^\xb6>\xa0\x0e\xf74\xe8A\t\x12p\x9a\xf6V', 'blockNumber': 30, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x00000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000005', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\xffG\xbc:\x10\xa4]K#\x0b]\x10\xe3wQ\xfej\xa7\x18']},  # noqa: E501
            'DeregisterNotary',
            [
                ('index_in_notary_pool', 16),
                ('notary', b'\x1e\xffG\xbc:\x10\xa4]K#\x0b]\x10\xe3wQ\xfej\xa7\x18'),
                ('deregistered_period', 5),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x00000000000000000000000000000000000000000000000000000000000000011010101010101010101010101010101010101010101010101010101010101010', 'topics': [b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']},  # noqa: E501
            'AddHeader',
            [
                ('period', 1),
                ('shard_id', 0),
                ('chunk_root', b'\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10'),  # noqa: E501
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\x16\xc2\x0b\xadZ|\x92l@@\xb1\x15\x93nh\xd6]p\x16\xae\xd5\xe7\x9crKl\x8c\xcf\x06\x9a\xd4\x05', 'blockHash': b'\x94\\\xce\x19\x01:j\xbb\xf8\xba\x19\xcfv\xc3z3}^\xb6>\xa0\x0e\xf74\xe8A\t\x12p\x9a\xf6V', 'blockNumber': 30, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x00000000000000000000000000000000000000000000000000000000000000077373737373737373737373737373737373737373737373737373737373737373', 'topics': [b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03']},  # noqa: E501
            'AddHeader',
            [
                ('period', 7),
                ('shard_id', 3),
                ('chunk_root', b'ssssssssssssssssssssssssssssssss'),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x000000000000000000000000000000000000000000000000000000000000001010011001100110011001100110011001100110011001100110011001100110010000000000000000000000001eff47bc3a10a45d4b230b5d10e37751fe6aa718', 'topics': [b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01']},  # noqa: E501
            'SubmitVote',
            [
                ('period', 16),
                ('shard_id', 1),
                ('chunk_root', b'\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01\x10\x01'),  # noqa: E501
                ('notary', b'\x1e\xffG\xbc:\x10\xa4]K#\x0b]\x10\xe3wQ\xfej\xa7\x18'),
            ]
        ),
        (
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\x16\xc2\x0b\xadZ|\x92l@@\xb1\x15\x93nh\xd6]p\x16\xae\xd5\xe7\x9crKl\x8c\xcf\x06\x9a\xd4\x05', 'blockHash': b'\x94\\\xce\x19\x01:j\xbb\xf8\xba\x19\xcfv\xc3z3}^\xb6>\xa0\x0e\xf74\xe8A\t\x12p\x9a\xf6V', 'blockNumber': 30, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x000000000000000000000000000000000000000000000000000000000000002121632163216321632163216321632163216321632163216321632163216321630000000000000000000000007e5f4552091a69125d5dfcb7b8c2659029395bdf', 'topics': [b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63']},  # noqa: E501
            'SubmitVote',
            [
                ('period', 33),
                ('shard_id', 99),
                ('chunk_root', b'!c!c!c!c!c!c!c!c!c!c!c!c!c!c!c!c'),
                ('notary', b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf'),
            ]
        ),
    )
)
def test_normal_log_parser(raw_log, event_name, attr_tuples):
    parsed_log = LogParser(event_name=event_name, log=raw_log)
    for attr in attr_tuples:
        assert getattr(parsed_log, attr[0]) == attr[1]


@pytest.mark.parametrize(
    'raw_log, event_name',
    (
        (
            # Wrong event name
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000000a', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf']},  # noqa: E501
            'WrongEventName',
        ),
        (
            # Too many topics in log
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000000', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf']},  # noqa: E501
            'RegisterNotary',
        ),
        (
            # Too few topics in log
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000000', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec']},  # noqa: E501
            'RegisterNotary',
        ),
        (
            # Too many data in log
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ff', 'topics': [b'B\xccp\x0f[x\xa7Le \xecSA\xd7\xc4\x9e\xea\xa8\xf8\x90\x15\xe7\x14\xb4\xd7 |\x94|-\x19\xec', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf']},  # noqa: E501
            'DeregisterNotary',
        ),
        (
            # Too few data in log
            {'type': 'mined', 'logIndex': 0, 'transactionIndex': 0, 'transactionHash': b'\xda\xb8:\xe5\x86\xe9Q\xf2\x9c\xc6<g\x9bl\x84\x85\xf4\x1dh\xce\x8d\xe6\xc0D\xa0*E\xd8m\xd4\x01\xcf', 'blockHash': b'\x13\xa97d\r\x90t\xe5;\x84\xf9\xe0\xb8\xf2c\x1c}\x88\xbf\x84DN\xa0\x16Q\xd9|\xa1\x00\x91\xc0\xbd', 'blockNumber': 25, 'address': '0xf4F1600B0a65995833854738764b50A4DA8d6BE1', 'data': '0x0000000000000000000000000000000000000000000000000000000000000001', 'topics': [b'$\xa5\x146ipE\xb9:y\xa2\xbd\xa9\x00\xb0PU\xf1\xe1\xe9\x1b\x02\x1bL/\xb6\xf6|\xbb\x0b.\x95', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']},  # noqa: E501
            'AddHeader',
        ),
    )
)
def test_log_parser_with_wrong_log_content(raw_log, event_name):
    with pytest.raises(LogParsingError):
        LogParser(event_name=event_name, log=raw_log)


def test_status_checking_functions(smc_handler, smc_testing_config):  # noqa: F811
    w3 = smc_handler.web3
    config = smc_testing_config
    shard_tracker = ShardTracker(
        w3=w3,
        config=config,
        shard_id=0,
        smc_handler_address=smc_handler.address,
    )

    # Register nine notaries
    batch_register(smc_handler, 0, 8)
    # Check that registration log was/was not emitted accordingly
    assert shard_tracker.is_notary_registered(notary=NotaryAccount(0).checksum_address)
    assert shard_tracker.is_notary_registered(notary=NotaryAccount(5).checksum_address)
    assert not shard_tracker.is_notary_registered(notary=NotaryAccount(9).checksum_address)
    fast_forward(smc_handler, 1)

    # Check that add header log has not been emitted yet
    current_period = w3.eth.blockNumber // config['PERIOD_LENGTH']
    assert not shard_tracker.is_new_header_added(period=current_period)
    # Add header in multiple shards
    CHUNK_ROOT_1_0 = b'\x10' * 32
    smc_handler.add_header(
        shard_id=0,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        private_key=NotaryAccount(0).private_key,
    )
    CHUNK_ROOT_1_7 = b'\x17' * 32
    smc_handler.add_header(
        shard_id=7,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_7,
        private_key=NotaryAccount(7).private_key,
    )
    CHUNK_ROOT_1_3 = b'\x13' * 32
    smc_handler.add_header(
        shard_id=3,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_3,
        private_key=NotaryAccount(3).private_key,
    )
    mine(w3, 1)
    # Check that add header log was successfully emitted
    assert shard_tracker.is_new_header_added(period=current_period)

    # Check that there has not been enough votes yet in shard 0
    assert not shard_tracker.has_enough_vote(period=current_period)
    # Submit three votes in shard 0 and one vote in shard 7
    for sample_index in range(3):
        pool_index = sampling(smc_handler, 0)[sample_index]
        smc_handler.submit_vote(
            shard_id=0,
            period=current_period,
            chunk_root=CHUNK_ROOT_1_0,
            index=sample_index,
            private_key=NotaryAccount(pool_index).private_key,
        )
        mine(w3, 1)
    sample_index = 0
    pool_index = sampling(smc_handler, 7)[sample_index]
    smc_handler.submit_vote(
        shard_id=7,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_7,
        index=sample_index,
        private_key=NotaryAccount(pool_index).private_key,
    )
    mine(w3, 1)
    # Check that there has not been enough votes yet in shard 0
    # Only three votes in shard 0 while four is required
    assert not shard_tracker.has_enough_vote(period=current_period)
    # Cast the fourth vote
    sample_index = 3
    pool_index = sampling(smc_handler, 0)[sample_index]
    smc_handler.submit_vote(
        shard_id=0,
        period=current_period,
        chunk_root=CHUNK_ROOT_1_0,
        index=sample_index,
        private_key=NotaryAccount(pool_index).private_key,
    )
    mine(w3, 1)
    # Check that there are enough votes now in shard 0
    assert shard_tracker.has_enough_vote(period=current_period)
    # Proceed to next period
    fast_forward(smc_handler, 1)

    # Go back and check the status of header and vote counts in last period
    current_period = w3.eth.blockNumber // config['PERIOD_LENGTH']
    assert shard_tracker.is_new_header_added(period=(current_period - 1))
    assert shard_tracker.has_enough_vote(period=(current_period - 1))

    # Deregister
    smc_handler.deregister_notary(private_key=NotaryAccount(0).private_key)
    mine(w3, 1)
    # Check that deregistration log was/was not emitted accordingly
    assert shard_tracker.is_notary_deregistered(NotaryAccount(0).checksum_address)
    assert not shard_tracker.is_notary_deregistered(NotaryAccount(5).checksum_address)

    # Fast foward to end of lock up
    fast_forward(smc_handler, smc_handler.config['NOTARY_LOCKUP_LENGTH'] + 1)
    # Release
    smc_handler.release_notary(private_key=NotaryAccount(0).private_key)
    mine(w3, 1)
    # Check that log was successfully emitted
    assert shard_tracker.is_notary_released(NotaryAccount(0).checksum_address)


================================================
FILE: tests/handler/test_smc_handler.py
================================================
import logging

import pytest

from sharding.handler.utils.smc_handler_utils import (
    make_call_context,
    make_transaction_context,
)


ZERO_ADDR = b'\x00' * 20

logger = logging.getLogger('evm.chain.sharding.mainchain_handler.SMC')


def test_make_transaction_context():
    transaction_context = make_transaction_context(
        nonce=0,
        gas=10000,
    )
    assert 'nonce' in transaction_context
    assert 'gas' in transaction_context
    assert 'chainId' in transaction_context
    with pytest.raises(ValueError):
        transaction_context = make_transaction_context(
            nonce=None,
            gas=10000,
        )
    with pytest.raises(ValueError):
        transaction_context = make_transaction_context(
            nonce=0,
            gas=None,
        )


def test_make_call_context():
    call_context = make_call_context(
        sender_address=ZERO_ADDR,
        gas=1000,
    )
    assert 'from' in call_context
    assert 'gas' in call_context
    with pytest.raises(ValueError):
        call_context = make_call_context(
            sender_address=None,
            gas=1000,
        )


================================================
FILE: tests/handler/utils/__init__.py
================================================


================================================
FILE: tests/handler/utils/config.py
================================================
from cytoolz import (
    merge,
)

from sharding.contracts.utils.config import (
    get_sharding_config,
)


def get_sharding_testing_config():
    REPLACED_PARAMETERS = {
        'SHARD_COUNT': 10,
        'PERIOD_LENGTH': 10,
        'COMMITTEE_SIZE': 6,
        'QUORUM_SIZE': 4,
        'NOTARY_LOCKUP_LENGTH': 30,
    }
    return merge(
        get_sharding_config(),
        REPLACED_PARAMETERS,
    )


================================================
FILE: tools/vyper_compile_script.py
================================================
import argparse
import json
import os

from vyper import compiler


def generate_compiled_json(file_path: str) -> None:
    vmc_code = open(file_path).read()
    abi = compiler.mk_full_signature(vmc_code)
    bytecode = compiler.compile(vmc_code)
    bytecode_hex = '0x' + bytecode.hex()
    contract_json = {
        'abi': abi,
        'bytecode': bytecode_hex,
    }
    # write json
    basename = os.path.basename(file_path)
    dirname = os.path.dirname(file_path)
    contract_name = basename.split('.')[0]
    with open(dirname + "/{}.json".format(contract_name), 'w') as f_write:
        json.dump(contract_json, f_write)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("path", type=str, help="the path of the contract")
    args = parser.parse_args()
    path = args.path
    generate_compiled_json(path)


if __name__ == '__main__':
    main()


================================================
FILE: tox.ini
================================================
[tox]
envlist=
    py{35,36}-{contract,handler}
    lint{35,36}

[flake8]
max-line-length= 100
exclude=
ignore=

[testenv]
usedevelop=True
passenv =
    PYTEST_ADDOPTS
    TRAVIS_EVENT_TYPE
commands=
    contract: py.test {posargs:tests/contract/}
    handler: py.test {posargs:tests/handler/}
extras =
    coincurve
deps = -r{toxinidir}/requirements-dev.txt
basepython =
    py35: python3.5
    py36: python3.6

[testenv:lint35]
basepython=python3.5
setenv=MYPYPATH={toxinidir}:{toxinidir}/stubs
commands=
    flake8 {toxinidir}/sharding --exclude="{toxinidir}/sharding/contracts/*.v.py"
    flake8 {toxinidir}/tests
    mypy --follow-imports=silent --ignore-missing-imports --disallow-incomplete-defs --disallow-untyped-defs sharding tools

[testenv:lint36]
basepython=python3.6
setenv=MYPYPATH={toxinidir}:{toxinidir}/stubs
commands=
    flake8 {toxinidir}/sharding --exclude="{toxinidir}/sharding/contracts/*.v.py"
    flake8 {toxinidir}/tests
    mypy --follow-imports=silent --ignore-missing-imports --disallow-incomplete-defs --disallow-untyped-defs sharding tools
Download .txt
gitextract_jgzbq6ot/

├── .bumpversion.cfg
├── .github/
│   ├── ISSUE_TEMPLATE.md
│   └── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .travis.yml
├── MANIFEST.in
├── Makefile
├── README.md
├── docs/
│   ├── doc.html
│   └── doc.md
├── requirements-dev.txt
├── requirements.txt
├── setup.py
├── sharding/
│   ├── __init__.py
│   ├── contracts/
│   │   ├── __init__.py
│   │   ├── sharding_manager.json
│   │   ├── sharding_manager.v.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── config.py
│   │       └── smc_utils.py
│   └── handler/
│       ├── __init__.py
│       ├── exceptions.py
│       ├── log_handler.py
│       ├── shard_tracker.py
│       ├── smc_handler.py
│       └── utils/
│           ├── __init__.py
│           ├── log_parser.py
│           ├── shard_tracker_utils.py
│           ├── smc_handler_utils.py
│           └── web3_utils.py
├── tests/
│   ├── __init__.py
│   ├── conftest.py
│   ├── contract/
│   │   ├── __init__.py
│   │   ├── test_add_header.py
│   │   ├── test_compile.py
│   │   ├── test_log_emission.py
│   │   ├── test_notary_sample.py
│   │   ├── test_registry_management.py
│   │   ├── test_submit_vote.py
│   │   └── utils/
│   │       ├── common_utils.py
│   │       ├── notary_account.py
│   │       └── sample_helper.py
│   └── handler/
│       ├── __init__.py
│       ├── test_log_handler.py
│       ├── test_shard_tracker.py
│       ├── test_smc_handler.py
│       └── utils/
│           ├── __init__.py
│           └── config.py
├── tools/
│   └── vyper_compile_script.py
└── tox.ini
Download .txt
SYMBOL INDEX (139 symbols across 26 files)

FILE: sharding/contracts/sharding_manager.v.py
  function __init__ (line 112) | def __init__(
  function is_empty_slots_stack_empty (line 132) | def is_empty_slots_stack_empty() -> bool:
  function empty_slots_stack_push (line 138) | def empty_slots_stack_push(index: int128):
  function empty_slots_stack_pop (line 145) | def empty_slots_stack_pop() -> int128:
  function get_notary_info (line 155) | def get_notary_info(notary_address: address) -> (int128, int128):
  function update_notary_sample_size (line 161) | def update_notary_sample_size() -> bool:
  function register_notary (line 176) | def register_notary() -> bool:
  function deregister_notary (line 210) | def deregister_notary() -> bool:
  function release_notary (line 232) | def release_notary() -> bool:
  function get_member_of_committee (line 257) | def get_member_of_committee(
  function add_header (line 293) | def add_header(
  function get_vote_count (line 336) | def get_vote_count(shard_id: int128) -> int128:
  function has_notary_voted (line 352) | def has_notary_voted(shard_id: int128, index: int128) -> bool:
  function update_vote (line 364) | def update_vote(shard_id: int128, index: int128) -> bool:
  function submit_vote (line 382) | def submit_vote(

FILE: sharding/contracts/utils/config.py
  function get_sharding_config (line 14) | def get_sharding_config() -> Dict[str, Any]:

FILE: sharding/contracts/utils/smc_utils.py
  function get_smc_source_code (line 13) | def get_smc_source_code() -> str:
  function get_smc_json (line 19) | def get_smc_json() -> Dict[str, Any]:

FILE: sharding/handler/exceptions.py
  class LogParsingError (line 1) | class LogParsingError(Exception):

FILE: sharding/handler/log_handler.py
  class LogHandler (line 18) | class LogHandler:
    method __init__ (line 22) | def __init__(self, w3: Web3, period_length: int) -> None:
    method get_logs (line 26) | def get_logs(self,

FILE: sharding/handler/shard_tracker.py
  class ShardTracker (line 35) | class ShardTracker:
    method __init__ (line 39) | def __init__(self,
    method _get_logs_by_shard_id (line 52) | def _get_logs_by_shard_id(self,
    method _get_logs_by_notary (line 68) | def _get_logs_by_notary(self,
    method _decide_period_block_number (line 89) | def _decide_period_block_number(self,
    method get_register_notary_logs (line 109) | def get_register_notary_logs(self,
    method get_deregister_notary_logs (line 123) | def get_deregister_notary_logs(self,
    method get_release_notary_logs (line 138) | def get_release_notary_logs(self,
    method get_add_header_logs (line 153) | def get_add_header_logs(self,
    method get_submit_vote_logs (line 167) | def get_submit_vote_logs(self,
    method is_notary_registered (line 183) | def is_notary_registered(self, notary: str, from_period: int=None) -> ...
    method is_notary_deregistered (line 193) | def is_notary_deregistered(self, notary: str, from_period: int=None) -...
    method is_notary_released (line 203) | def is_notary_released(self, notary: str, from_period: int=None) -> bool:
    method is_new_header_added (line 213) | def is_new_header_added(self, period: int) -> bool:
    method has_enough_vote (line 222) | def has_enough_vote(self, period: int) -> bool:

FILE: sharding/handler/smc_handler.py
  class SMC (line 38) | class SMC(Contract):
    method __init__ (line 54) | def __init__(self,
    method basic_call_context (line 69) | def basic_call_context(self) -> Dict[str, Any]:
    method does_notary_exist (line 77) | def does_notary_exist(self, notary_address: Address) -> bool:
    method get_notary_info (line 80) | def get_notary_info(self, notary_address: Address) -> Tuple[int, int]:
    method notary_pool_len (line 83) | def notary_pool_len(self) -> int:
    method notary_pool (line 86) | def notary_pool(self, pool_index: int) -> List[Address]:
    method empty_slots_stack_top (line 90) | def empty_slots_stack_top(self) -> int:
    method empty_slots_stack (line 93) | def empty_slots_stack(self, stack_index: int) -> List[int]:
    method current_period_notary_sample_size (line 96) | def current_period_notary_sample_size(self) -> int:
    method next_period_notary_sample_size (line 99) | def next_period_notary_sample_size(self) -> int:
    method notary_sample_size_updated_period (line 102) | def notary_sample_size_updated_period(self) -> int:
    method records_updated_period (line 105) | def records_updated_period(self, shard_id: int) -> int:
    method head_collation_period (line 108) | def head_collation_period(self, shard_id: int) -> int:
    method get_member_of_committee (line 111) | def get_member_of_committee(self, shard_id: int, index: int) -> Address:
    method get_collation_chunk_root (line 118) | def get_collation_chunk_root(self, shard_id: int, period: int) -> Hash32:
    method get_collation_proposer (line 124) | def get_collation_proposer(self, shard_id: int, period: int) -> Address:
    method get_collation_is_elected (line 131) | def get_collation_is_elected(self, shard_id: int, period: int) -> bool:
    method current_vote (line 137) | def current_vote(self, shard_id: int) -> bytes:
    method get_vote_count (line 142) | def get_vote_count(self, shard_id: int) -> int:
    method has_notary_voted (line 147) | def has_notary_voted(self, shard_id: int, index: int) -> bool:
    method _send_transaction (line 153) | def _send_transaction(self,
    method register_notary (line 192) | def register_notary(self,
    method deregister_notary (line 206) | def deregister_notary(self,
    method release_notary (line 219) | def release_notary(self,
    method add_header (line 232) | def add_header(self,
    method submit_vote (line 253) | def submit_vote(self,

FILE: sharding/handler/utils/log_parser.py
  class LogParser (line 26) | class LogParser(object):
    method __init__ (line 27) | def __init__(self, *, event_name: str, log: Dict[str, Any]) -> None:
    method _extract_event_abi (line 41) | def _extract_event_abi(self, *, event_name: str) -> Dict[str, Any]:
    method _set_topic_value (line 47) | def _set_topic_value(self, *, topics: List[Tuple[str, Any]], log: Dict...
    method _set_data_value (line 57) | def _set_data_value(self, *, data: List[Tuple[str, Any]], log: Dict[st...
    method _parse_value (line 68) | def _parse_value(self, *, val_type: str, val: bytes) -> Union[bool, Ad...

FILE: sharding/handler/utils/shard_tracker_utils.py
  function to_log_topic_address (line 18) | def to_log_topic_address(address: Union[Address, str]) -> str:
  function get_event_signature_from_abi (line 22) | def get_event_signature_from_abi(event_name: str) -> bytes:

FILE: sharding/handler/utils/smc_handler_utils.py
  function make_call_context (line 18) | def make_call_context(sender_address: Address,
  function make_transaction_context (line 41) | def make_transaction_context(nonce: int,

FILE: sharding/handler/utils/web3_utils.py
  function get_code (line 24) | def get_code(w3: Web3, address: Address) -> bytes:
  function get_nonce (line 28) | def get_nonce(w3: Web3, address: Address) -> int:
  function take_snapshot (line 32) | def take_snapshot(w3: Web3) -> int:
  function revert_to_snapshot (line 36) | def revert_to_snapshot(w3: Web3, snapshot_id: int) -> None:
  function mine (line 40) | def mine(w3: Web3, num_blocks: int) -> None:
  function send_raw_transaction (line 44) | def send_raw_transaction(w3: Web3, raw_transaction: BaseTransaction) -> ...
  function get_recent_block_hashes (line 51) | def get_recent_block_hashes(w3: Web3, history_size: int) -> Tuple[Hash32...
  function get_canonical_chain (line 65) | def get_canonical_chain(w3: Web3,

FILE: tests/conftest.py
  function smc_testing_config (line 31) | def smc_testing_config():
  function smc_handler (line 36) | def smc_handler(smc_testing_config):

FILE: tests/contract/test_add_header.py
  function test_normal_add_header (line 14) | def test_normal_add_header(smc_handler):  # noqa: F811
  function test_add_header_wrong_period (line 69) | def test_add_header_wrong_period(smc_handler):  # noqa: F811
  function test_add_header_wrong_shard (line 121) | def test_add_header_wrong_shard(smc_handler):  # noqa: F811
  function test_double_add_header (line 174) | def test_double_add_header(smc_handler):  # noqa: F811

FILE: tests/contract/test_compile.py
  function test_compile_smc (line 9) | def test_compile_smc():

FILE: tests/contract/test_log_emission.py
  function test_log_emission (line 19) | def test_log_emission(smc_handler):  # noqa: F811

FILE: tests/contract/test_notary_sample.py
  function test_normal_update_notary_sample_size (line 20) | def test_normal_update_notary_sample_size(smc_handler):  # noqa: F811
  function test_register_then_deregister (line 84) | def test_register_then_deregister(smc_handler):  # noqa: F811
  function test_deregister_then_register (line 107) | def test_deregister_then_register(smc_handler):  # noqa: F811
  function test_series_of_deregister_starting_from_top_of_the_stack (line 139) | def test_series_of_deregister_starting_from_top_of_the_stack(smc_handler...
  function test_series_of_deregister_starting_from_bottom_of_the_stack (line 186) | def test_series_of_deregister_starting_from_bottom_of_the_stack(smc_hand...
  function test_get_member_of_committee_without_updating_sample_size (line 239) | def test_get_member_of_committee_without_updating_sample_size(smc_handle...
  function test_get_member_of_committee_with_updated_sample_size (line 271) | def test_get_member_of_committee_with_updated_sample_size(smc_handler): ...
  function test_committee_lists_generated_are_different (line 294) | def test_committee_lists_generated_are_different(smc_handler):  # noqa: ...
  function test_get_member_of_committee_with_non_member (line 316) | def test_get_member_of_committee_with_non_member(smc_handler):  # noqa: ...
  function test_committee_change_with_deregister_then_register (line 334) | def test_committee_change_with_deregister_then_register(smc_handler):  #...
  function test_get_sample_result (line 360) | def test_get_sample_result(smc_handler):  # noqa: F811

FILE: tests/contract/test_registry_management.py
  function test_normal_register (line 14) | def test_normal_register(smc_handler):  # noqa: F811
  function test_register_without_enough_ether (line 58) | def test_register_without_enough_ether(smc_handler):  # noqa: F811
  function test_double_register (line 83) | def test_double_register(smc_handler):  # noqa: F811
  function test_normal_deregister (line 106) | def test_normal_deregister(smc_handler):  # noqa: F811
  function test_deregister_then_register (line 136) | def test_deregister_then_register(smc_handler):  # noqa: F811
  function test_normal_release_notary (line 175) | def test_normal_release_notary(smc_handler):  # noqa: F811
  function test_instant_release_notary (line 207) | def test_instant_release_notary(smc_handler):  # noqa: F811
  function test_deregister_and_new_notary_register (line 239) | def test_deregister_and_new_notary_register(smc_handler):  # noqa: F811

FILE: tests/contract/test_submit_vote.py
  function test_normal_submit_vote (line 20) | def test_normal_submit_vote(smc_handler):  # noqa: F811
  function test_double_submit_vote (line 100) | def test_double_submit_vote(smc_handler):  # noqa: F811
  function test_submit_vote_by_notary_sampled_multiple_times (line 151) | def test_submit_vote_by_notary_sampled_multiple_times(smc_handler):  # n...
  function test_submit_vote_by_non_eligible_notary (line 197) | def test_submit_vote_by_non_eligible_notary(smc_handler):  # noqa: F811
  function test_submit_vote_without_add_header_first (line 237) | def test_submit_vote_without_add_header_first(smc_handler):  # noqa: F811
  function test_submit_vote_with_invalid_args (line 279) | def test_submit_vote_with_invalid_args(smc_handler, period, shard_id, ch...
  function test_submit_vote_then_deregister (line 314) | def test_submit_vote_then_deregister(smc_handler):  # noqa: F811

FILE: tests/contract/utils/common_utils.py
  function update_notary_sample_size (line 9) | def update_notary_sample_size(smc_handler):
  function batch_register (line 19) | def batch_register(smc_handler, start, end):
  function fast_forward (line 27) | def fast_forward(smc_handler, num_of_periods):

FILE: tests/contract/utils/notary_account.py
  class NotaryAccount (line 6) | class NotaryAccount:
    method __init__ (line 9) | def __init__(self, index):
    method private_key (line 13) | def private_key(self):
    method checksum_address (line 17) | def checksum_address(self):
    method canonical_address (line 21) | def canonical_address(self):

FILE: tests/contract/utils/sample_helper.py
  function get_notary_pool_list (line 13) | def get_notary_pool_list(smc_handler):
  function sampling (line 22) | def sampling(smc_handler, shard_id):
  function get_committee_list (line 54) | def get_committee_list(smc_handler, shard_id):
  function get_sample_result (line 63) | def get_sample_result(smc_handler, notary_index):

FILE: tests/handler/test_log_handler.py
  function contract (line 61) | def contract():
  function test_get_logs_without_forks (line 75) | def test_get_logs_without_forks(contract, smc_testing_config):
  function test_get_logs_with_forks (line 105) | def test_get_logs_with_forks(contract, smc_testing_config):

FILE: tests/handler/test_shard_tracker.py
  function test_normal_log_parser (line 110) | def test_normal_log_parser(raw_log, event_name, attr_tuples):
  function test_log_parser_with_wrong_log_content (line 146) | def test_log_parser_with_wrong_log_content(raw_log, event_name):
  function test_status_checking_functions (line 151) | def test_status_checking_functions(smc_handler, smc_testing_config):  # ...

FILE: tests/handler/test_smc_handler.py
  function test_make_transaction_context (line 16) | def test_make_transaction_context():
  function test_make_call_context (line 36) | def test_make_call_context():

FILE: tests/handler/utils/config.py
  function get_sharding_testing_config (line 10) | def get_sharding_testing_config():

FILE: tools/vyper_compile_script.py
  function generate_compiled_json (line 8) | def generate_compiled_json(file_path: str) -> None:
  function main (line 25) | def main() -> None:
Condensed preview — 50 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (199K chars).
[
  {
    "path": ".bumpversion.cfg",
    "chars": 475,
    "preview": "[bumpversion]\ncurrent_version = 0.0.2-alpha.2\ncommit = True\ntag = True\nparse = (?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "chars": 461,
    "preview": "* OS: osx/linux/win\n* Environment (output of `pip freeze`):\n    * Python version\n    * Vyper version\n    * py-evm versio"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "chars": 133,
    "preview": "### What was wrong?\n\n\n\n### How was it fixed?\n\n\n\n#### Cute Animal Picture\n\n![put a cute animal picture link inside the pa"
  },
  {
    "path": ".gitignore",
    "chars": 217,
    "preview": "*.py[co]\n__pycache__/\n*~\n[#]*[#]\n.*.swp\n.*.swo\n.*.swn\n.~\n.DS_Store\n/tmp/\n/.venv/\n/dist/\n/*.egg-info/\n/.tox/\n/bin/\n/devel"
  },
  {
    "path": ".travis.yml",
    "chars": 758,
    "preview": "sudo: false\nlanguage: python\ndist: trusty\nenv:\n  global:\n    - PYTEST_ADDOPTS=\"-n 2 --durations 50 --maxfail 50\"\nmatrix:"
  },
  {
    "path": "MANIFEST.in",
    "chars": 122,
    "preview": "include README.md\ninclude requirements.txt\ninclude requirements-dev.txt\n\ninclude sharding/contracts/sharding_manager.jso"
  },
  {
    "path": "Makefile",
    "chars": 1404,
    "preview": "# Variables\n# compile-smc parameters\ncompile_script = tools/vyper_compile_script.py\ncontract = sharding/contracts/shardi"
  },
  {
    "path": "README.md",
    "chars": 312,
    "preview": "# Sharding\n\n### Sharding Implementation\nRefer [Py-EVM](https://github.com/ethereum/py-evm) for the latest implementation"
  },
  {
    "path": "docs/doc.html",
    "chars": 12007,
    "preview": "<h3>Preliminaries</h3>\n\n<p>We assume that at address <code>VALIDATOR_MANAGER_ADDRESS</code> (on the existing \"main shard"
  },
  {
    "path": "docs/doc.md",
    "chars": 26067,
    "preview": "## Introduction\n\nThe purpose of this document is to provide a reasonably complete specification and introduction for any"
  },
  {
    "path": "requirements-dev.txt",
    "chars": 291,
    "preview": "bumpversion>=0.5.3,<1\nflake8==3.5.0\nmypy==0.600\nhypothesis==3.44.26\npytest==3.6.0\npytest-asyncio==0.8.0\npytest-cov==2.5."
  },
  {
    "path": "requirements.txt",
    "chars": 118,
    "preview": "cytoolz>=0.9.0,<1.0.0\neth-utils>=1.0.3,<2.0.0\nrlp>=1.0.0,<2.0.0\nweb3>=4.1.0,<5.0.0\npy-evm==0.2.0a18\neth-typing==1.0.0\n"
  },
  {
    "path": "setup.py",
    "chars": 1067,
    "preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\n\n# requirements\nINSTALL_REQUI"
  },
  {
    "path": "sharding/__init__.py",
    "chars": 420,
    "preview": "import pkg_resources\n\nfrom sharding.contracts.utils.smc_utils import (  # noqa: F401\n    get_smc_source_code,\n    get_sm"
  },
  {
    "path": "sharding/contracts/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "sharding/contracts/sharding_manager.json",
    "chars": 21111,
    "preview": "{\"abi\": [{\"name\": \"RegisterNotary\", \"inputs\": [{\"type\": \"int128\", \"name\": \"index_in_notary_pool\", \"indexed\": false}, {\"t"
  },
  {
    "path": "sharding/contracts/sharding_manager.v.py",
    "chars": 14334,
    "preview": "# NOTE: Some variables are set as public variables for testing. They should be reset\n# to private variables in an offici"
  },
  {
    "path": "sharding/contracts/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "sharding/contracts/utils/config.py",
    "chars": 1116,
    "preview": "from typing import (\n    Any,\n    Dict,\n)\nfrom eth_utils import (\n    to_wei,\n)\n\nfrom evm.utils import (\n    env,\n)\n\n\nde"
  },
  {
    "path": "sharding/contracts/utils/smc_utils.py",
    "chars": 449,
    "preview": "import json\nimport os\n\nfrom typing import (\n    Any,\n    Dict,\n)\n\n\nDIR = os.path.dirname(__file__)\n\n\ndef get_smc_source_"
  },
  {
    "path": "sharding/handler/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "sharding/handler/exceptions.py",
    "chars": 43,
    "preview": "class LogParsingError(Exception):\n    pass\n"
  },
  {
    "path": "sharding/handler/log_handler.py",
    "chars": 1641,
    "preview": "import logging\nfrom typing import (\n    Any,\n    Dict,\n    List,\n    Union,\n)\n\nfrom evm.exceptions import BlockNotFound\n"
  },
  {
    "path": "sharding/handler/shard_tracker.py",
    "chars": 8020,
    "preview": "from web3 import Web3\n\nfrom typing import (\n    Any,\n    Dict,\n    Generator,\n    List,\n    Optional,\n    Union,\n    Tup"
  },
  {
    "path": "sharding/handler/smc_handler.py",
    "chars": 9013,
    "preview": "import logging\nfrom typing import (\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Tuple,\n)\n\nfrom web3.contract import ("
  },
  {
    "path": "sharding/handler/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "sharding/handler/utils/log_parser.py",
    "chars": 2781,
    "preview": "from typing import (\n    Any,\n    Dict,\n    List,\n    Tuple,\n    Union,\n)\n\nfrom eth_utils import (\n    to_canonical_addr"
  },
  {
    "path": "sharding/handler/utils/shard_tracker_utils.py",
    "chars": 659,
    "preview": "from typing import (\n    Union,\n)\n\nfrom eth_utils import (\n    event_abi_to_log_topic,\n    to_checksum_address,\n)\nfrom e"
  },
  {
    "path": "sharding/handler/utils/smc_handler_utils.py",
    "chars": 1809,
    "preview": "from typing import (\n    Any,\n    Generator,\n    Tuple,\n)\n\nfrom eth_utils import (\n    is_address,\n    to_checksum_addre"
  },
  {
    "path": "sharding/handler/utils/web3_utils.py",
    "chars": 2295,
    "preview": "import rlp\n\nfrom evm.rlp.transactions import (\n    BaseTransaction,\n)\nfrom web3 import (\n    Web3,\n)\n\nfrom eth_utils imp"
  },
  {
    "path": "tests/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tests/conftest.py",
    "chars": 2034,
    "preview": "import pytest\n\nfrom web3 import (\n    Web3,\n)\n\nfrom web3.providers.eth_tester import (\n    EthereumTesterProvider,\n)\n\nfr"
  },
  {
    "path": "tests/contract/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tests/contract/test_add_header.py",
    "chars": 8205,
    "preview": "from sharding.handler.utils.web3_utils import (\n    mine,\n)\n\nfrom tests.contract.utils.common_utils import (\n    batch_r"
  },
  {
    "path": "tests/contract/test_compile.py",
    "chars": 456,
    "preview": "from vyper import compiler\n\nfrom sharding.contracts.utils.smc_utils import (\n    get_smc_json,\n    get_smc_source_code,\n"
  },
  {
    "path": "tests/contract/test_log_emission.py",
    "chars": 3173,
    "preview": "from sharding.handler.shard_tracker import (  # noqa: F401\n    ShardTracker,\n)\nfrom sharding.handler.utils.web3_utils im"
  },
  {
    "path": "tests/contract/test_notary_sample.py",
    "chars": 15421,
    "preview": "from sharding.handler.utils.web3_utils import (\n    mine,\n)\n\nfrom tests.contract.utils.common_utils import (\n    update_"
  },
  {
    "path": "tests/contract/test_registry_management.py",
    "chars": 9849,
    "preview": "from sharding.handler.utils.web3_utils import (\n    mine,\n)\n\nfrom tests.contract.utils.common_utils import (\n    batch_r"
  },
  {
    "path": "tests/contract/test_submit_vote.py",
    "chars": 12993,
    "preview": "import pytest\n\nfrom sharding.handler.utils.web3_utils import (\n    mine,\n)\n\nfrom tests.contract.utils.common_utils impor"
  },
  {
    "path": "tests/contract/utils/common_utils.py",
    "chars": 1063,
    "preview": "from sharding.handler.utils.web3_utils import (\n    mine,\n)\nfrom tests.contract.utils.notary_account import (\n    Notary"
  },
  {
    "path": "tests/contract/utils/notary_account.py",
    "chars": 498,
    "preview": "from eth_tester.backends.pyevm.main import (\n    get_default_account_keys,\n)\n\n\nclass NotaryAccount:\n    index = None\n\n  "
  },
  {
    "path": "tests/contract/utils/sample_helper.py",
    "chars": 2817,
    "preview": "from eth_utils import (\n    to_list,\n    keccak,\n    big_endian_to_int,\n)\n\nfrom evm.utils.numeric import (\n    int_to_by"
  },
  {
    "path": "tests/handler/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tests/handler/test_log_handler.py",
    "chars": 4485,
    "preview": "import itertools\n\nimport pytest\n\nfrom cytoolz.dicttoolz import (\n    assoc,\n)\n\nfrom web3 import (\n    Web3,\n)\n\nfrom web3"
  },
  {
    "path": "tests/handler/test_shard_tracker.py",
    "chars": 17535,
    "preview": "import logging\n\nimport pytest\n\nfrom sharding.handler.exceptions import (\n    LogParsingError,\n)\nfrom sharding.handler.ut"
  },
  {
    "path": "tests/handler/test_smc_handler.py",
    "chars": 1131,
    "preview": "import logging\n\nimport pytest\n\nfrom sharding.handler.utils.smc_handler_utils import (\n    make_call_context,\n    make_tr"
  },
  {
    "path": "tests/handler/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tests/handler/utils/config.py",
    "chars": 411,
    "preview": "from cytoolz import (\n    merge,\n)\n\nfrom sharding.contracts.utils.config import (\n    get_sharding_config,\n)\n\n\ndef get_s"
  },
  {
    "path": "tools/vyper_compile_script.py",
    "chars": 892,
    "preview": "import argparse\nimport json\nimport os\n\nfrom vyper import compiler\n\n\ndef generate_compiled_json(file_path: str) -> None:\n"
  },
  {
    "path": "tox.ini",
    "chars": 1072,
    "preview": "[tox]\nenvlist=\n    py{35,36}-{contract,handler}\n    lint{35,36}\n\n[flake8]\nmax-line-length= 100\nexclude=\nignore=\n\n[testen"
  }
]

About this extraction

This page contains the full source code of the ethereum/sharding GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 50 files (184.7 KB), approximately 54.3k tokens, and a symbol index with 139 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!