### License
This project is licensed under the GNU Affero General Public License v3.0.
The complete license text can be accessed in the repository at [LICENSE](https://github.com/Zaczero/CBBI/blob/main/LICENSE).
================================================
FILE: api/cbbiinfo_api.py
================================================
import polars as pl
from utils import HTTP
def cbbi_fetch(key: str):
response = HTTP.get('https://colintalkscrypto.com/cbbi/data/latest.json')
response.raise_for_status()
response_data = response.json()[key]
return (
pl
.DataFrame({
'Date': pl.Series([int(k) for k in response_data], dtype=pl.Int64),
'Value': pl.Series(list(response_data.values()), dtype=pl.Float64),
})
.with_columns(
Date=(
pl
.from_epoch(pl.col('Date'), time_unit='s')
.dt.cast_time_unit('us')
.dt.replace_time_zone('UTC')
),
)
.select('Date', 'Value')
)
================================================
FILE: api/coinmetrics_api.py
================================================
import polars as pl
from utils import HTTP
def cm_fetch_asset_metrics(
*,
asset: str,
metrics: list[str],
frequency: str = '1d',
start_time: str,
page_size: int = 10_000,
null_as_zero: bool = False,
):
"""
Fetch Coin Metrics Community time series asset metrics.
Notes:
- Coin Metrics returns numbers as strings. This helper casts metric columns to numeric.
"""
response = HTTP.get(
'https://community-api.coinmetrics.io/v4/timeseries/asset-metrics',
params={
'assets': asset,
'metrics': ','.join(metrics),
'frequency': frequency,
'start_time': start_time,
'paging_from': 'start',
'page_size': page_size,
'sort': 'time',
'null_as_zero': str(null_as_zero).lower(),
},
)
response.raise_for_status()
response_json = response.json()
data = response_json['data']
while response_json.get('next_page_url'):
response = HTTP.get(response_json['next_page_url'])
response.raise_for_status()
response_json = response.json()
data.extend(response_json['data'])
return (
pl
.from_dicts(data, infer_schema_length=None)
.with_columns(
*[pl.col(col).cast(pl.Float64) for col in metrics if col in data[0]],
Date=pl.col('time').str.to_datetime(time_zone='UTC').dt.truncate('1d'),
)
.drop('time')
)
================================================
FILE: api/coinsoto_api.py
================================================
import polars as pl
from utils import HTTP
def cs_fetch(path: str, data_selector: str, col_name: str):
response = HTTP.get(f'https://api.coinank.com/indicatorapi/{path}')
response.raise_for_status()
data = response.json()['data']
if 'timeList' not in data and 'line' in data:
data = data['line']
data_x = data['timeList']
data_y = data[data_selector]
return (
pl
.DataFrame({
'Date': data_x[: len(data_y)],
col_name: data_y,
})
.with_columns(
Date=(
pl
.from_epoch(pl.col('Date'), time_unit='ms')
.dt.cast_time_unit('us')
.dt.replace_time_zone('UTC')
)
)
.select('Date', col_name)
)
================================================
FILE: fetch_bitcoin_data.py
================================================
import polars as pl
from api.coinmetrics_api import cm_fetch_asset_metrics
from utils import mark_days_since, mark_highs_lows
def fetch_coinmetrics_data():
"""
Fetch historical Bitcoin blockchain data from Coin Metrics Community API.
"""
df_cm = cm_fetch_asset_metrics(
asset='btc',
metrics=['BlkCnt', 'IssTotNtv', 'IssTotUSD', 'PriceUSD'],
start_time='2009-01-03',
)
df = df_cm.select(
Date=pl.col('Date'),
TotalBlocks=pl.col('BlkCnt'),
IssTotNtv=pl.col('IssTotNtv'),
IssTotUSD=pl.col('IssTotUSD'),
Price=pl.col('PriceUSD'),
)
avg_subsidy = (
pl
.when(pl.col('TotalBlocks') > 0)
.then(pl.col('IssTotNtv') / pl.col('TotalBlocks'))
.otherwise(None)
)
subsidy_floor = pl.lit(50.0) / (
pl.lit(2.0) ** (pl.lit(50.0) / avg_subsidy).log(base=2).ceil()
)
df = df.with_columns(
Halving=(
subsidy_floor.is_not_null()
& subsidy_floor.shift(1).is_not_null()
& (subsidy_floor != subsidy_floor.shift(1))
)
)
return df.select(
'Date',
'IssTotUSD',
'Price',
'Halving',
)
def fetch_bitcoin_data():
"""
Fetches historical Bitcoin data into a DataFrame.
Very early data is discarded due to high volatility.
"""
print('📈 Requesting historical Bitcoin data…')
df = fetch_coinmetrics_data()
df = df.with_columns(
PuellMultiple=(
pl.col('IssTotUSD')
/ pl.col('IssTotUSD').rolling_mean(window_size=365, min_samples=365)
),
Price730DMA=pl.col('Price').rolling_mean(window_size=730, min_samples=1),
)
df = df.filter(pl.col('Date') >= pl.datetime(2011, 6, 27, time_zone='UTC'))
df = df.with_columns(
PriceLog=pl.col('Price').log(),
)
df = mark_highs_lows(df, 'Price', False, 365 * 2, 180)
# move 2021' peak to the first price peak
df = df.with_columns(
PriceHigh=(
pl
.when(pl.col('Date') == pl.datetime(2021, 11, 8, time_zone='UTC'))
.then(False)
.when(pl.col('Date') == pl.datetime(2021, 4, 14, time_zone='UTC'))
.then(True)
.otherwise(pl.col('PriceHigh'))
)
)
return mark_days_since(df, ['PriceLow', 'Halving'])
================================================
FILE: main.py
================================================
import asyncio
import math
import time
import traceback
from pathlib import Path
import fire
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib import pyplot as plt
from pyfiglet import figlet_format
from sty import bg, ef, fg, rs
from tqdm import tqdm
from fetch_bitcoin_data import fetch_bitcoin_data
from metrics.mvrv_z_score import MVRVMetric
from metrics.pi_cycle import PiCycleMetric
from metrics.puell_multiple import PuellMetric
from metrics.reserve_risk import ReserveRiskMetric
from metrics.rhodl_ratio import RHODLMetric
from metrics.rupl import RUPLMetric
from metrics.trolololo import TrolololoMetric
from metrics.two_year_moving_average import TwoYearMovingAverageMetric
from metrics.woobull_topcap_cvdd import WoobullMetric
from utils import format_percentage, get_color
def get_metrics():
"""
Returns a list of available metrics to be calculated.
"""
return [
PiCycleMetric(),
RUPLMetric(),
RHODLMetric(),
PuellMetric(),
TwoYearMovingAverageMetric(),
TrolololoMetric(),
MVRVMetric(),
ReserveRiskMetric(),
WoobullMetric(),
]
def _json_number(value: float | None, *, precision: int = 4):
if value is None or (isinstance(value, float) and math.isnan(value)):
return 'null'
s = f'{value:.{precision}f}'.rstrip('0')
if s.endswith('.'):
s += '0'
return s
def _write_columns_orient_json(df: pl.DataFrame, path: Path, *, precision: int = 4):
ts = df.get_column('Date').dt.epoch(time_unit='s').to_list()
cols = [c for c in df.columns if c != 'Date']
with path.open('w', encoding='utf-8') as f:
f.write('{\n')
for col_i, col in enumerate(cols):
f.write(f' "{col}":{{\n')
values = df.get_column(col).to_list()
for row_i, (t, v) in enumerate(zip(ts, values, strict=True)):
comma = ',' if row_i < len(ts) - 1 else ''
f.write(f' "{t}":{_json_number(v, precision=precision)}{comma}\n')
col_comma = ',' if col_i < len(cols) - 1 else ''
f.write(f' }}{col_comma}\n')
f.write('}')
def _add_common_markers(
ax, *, halvings: np.ndarray, highs: np.ndarray, lows: np.ndarray
):
for dt in halvings:
ax.axvline(x=dt, color='navy', linestyle=':', linewidth=0.5)
for dt in highs:
ax.axvline(x=dt, color='green', linestyle=':', linewidth=0.5)
for dt in lows:
ax.axvline(x=dt, color='red', linestyle=':', linewidth=0.5)
def _shade_metric_bounds(ax):
ax.axhline(y=1, color='black', linewidth=0.5)
ax.axhline(y=0, color='black', linewidth=0.5)
y_min, y_max = ax.get_ylim()
ax.fill_betweenx(
y=[1, y_max],
x1=0,
x2=1,
transform=ax.get_yaxis_transform(),
color='black',
alpha=0.1,
edgecolor='none',
zorder=0,
)
ax.fill_betweenx(
y=[y_min, 0],
x1=0,
x2=1,
transform=ax.get_yaxis_transform(),
color='black',
alpha=0.1,
edgecolor='none',
zorder=0,
)
async def run(json_file: str, charts_file: str, output_dir: str | None):
output_dir_path = Path.cwd() if output_dir is None else Path(output_dir)
json_file_path = output_dir_path / Path(json_file)
charts_file_path = output_dir_path / Path(charts_file)
output_dir_path.mkdir(mode=0o755, parents=True, exist_ok=True)
df_bitcoin = fetch_bitcoin_data()
current_price = df_bitcoin.get_column('Price')[-1]
print(
'Current Bitcoin price: '
+ ef.b
+ fg.li_green
+ bg.da_green
+ f' $ {round(current_price):,} '
+ rs.all
)
metrics = get_metrics()
metrics_cols = []
metrics_descriptions = []
sns.set_theme(
font_scale=0.225,
rc={
'figure.titlesize': 12,
'axes.titlesize': 7.5,
'axes.labelsize': 6,
'xtick.labelsize': 4,
'ytick.labelsize': 4,
'lines.linewidth': 0.5,
'grid.linewidth': 0.3,
'savefig.dpi': 1000,
'figure.dpi': 300,
},
)
x = df_bitcoin.get_column('Date').to_numpy()
price_log = df_bitcoin.get_column('PriceLog').to_numpy()
price = (price_log - price_log.min()) / (price_log.max() - price_log.min())
halvings = x[df_bitcoin.get_column('Halving').to_numpy()]
highs = x[df_bitcoin.get_column('PriceHigh').to_numpy()]
lows = x[df_bitcoin.get_column('PriceLow').to_numpy()]
axes_per_metric = 2
fig, axes = plt.subplots(
len(metrics), axes_per_metric, figsize=(4 * axes_per_metric, 3 * len(metrics))
)
axes = axes.reshape(-1, axes_per_metric)
plt.tight_layout(pad=10)
plt.subplots_adjust(top=0.98)
plt.suptitle(
'CBBI metric data input → output', fontsize=11.25, weight='bold', y=0.99508
)
for metric, ax_row in zip(metrics, axes, strict=True):
ax_out = ax_row[1]
ax_in = ax_row[0]
sns.lineplot(x=x, y=price, alpha=0.4, color='orange', ax=ax_out)
values = await metric.calculate(df_bitcoin, [ax_out, ax_in])
_add_common_markers(ax_out, halvings=halvings, highs=highs, lows=lows)
_add_common_markers(ax_in, halvings=halvings, highs=highs, lows=lows)
_shade_metric_bounds(ax_out)
ax_in.annotate(
'',
xy=(1.0967, 0.75),
xycoords='axes fraction',
xytext=(1.0367, 0.75),
textcoords='axes fraction',
arrowprops={
'arrowstyle': '->',
'color': 'darkgray',
'lw': 1.5,
'shrinkA': 0,
'shrinkB': 0,
'mutation_scale': 10,
},
ha='center',
va='center',
)
values = values.clip(0, 1).rename(metric.name)
df_bitcoin = df_bitcoin.with_columns(values)
metrics_cols.append(metric.name)
metrics_descriptions.append(metric.description)
df_result = df_bitcoin.select('Date', 'Price', *metrics_cols).with_columns(
Confidence=pl.mean_horizontal([pl.col(c).fill_nan(None) for c in metrics_cols])
)
print('Generating charts…')
plt.savefig(charts_file_path)
plt.close(fig)
_write_columns_orient_json(df_result, json_file_path, precision=4)
last = df_result.tail(1).row(0, named=True)
confidence_details = {
desc: last[name]
for name, desc in zip(metrics_cols, metrics_descriptions, strict=True)
}
print('\n' + ef.b + ':: Confidence we are at the peak ::' + rs.all)
print(
fg.cyan
+ ef.bold
+ figlet_format(format_percentage(last['Confidence'], ''), font='univers')
+ rs.all,
end='',
)
for description, value in confidence_details.items():
if value is not None and not (isinstance(value, float) and np.isnan(value)):
print(
fg.white + get_color(value) + f'{format_percentage(value)} ' + rs.all,
end='',
)
print(f' - {description}')
print()
print(
'Source code: ' + ef.u + fg.li_blue + 'https://github.com/Zaczero/CBBI' + rs.all
)
print('License: ' + ef.b + 'AGPL-3.0' + rs.all)
print()
def run_and_retry(
json_file: str = 'latest.json',
charts_file: str = 'charts.svg',
output_dir: str | None = 'output',
max_attempts: int = 3,
sleep_seconds_on_error: int = 60,
):
"""
Calculates the current CBBI confidence value alongside all the required metrics.
Everything gets pretty printed to the current standard output and a clean copy
is saved to a JSON file specified by the path in the ``json_file`` argument.
A charts image is generated on the path specified by the ``charts_file`` argument
which summarizes all individual metrics' historical data in a visual way.
The execution is attempted multiple times in case an error occurs.
Args:
json_file: File path where the output is saved in the JSON format.
charts_file: File path where the charts are saved (format inferred from file extension).
output_dir: Directory path where the output is stored.
If set to ``None`` then use the current working directory.
If the directory does not exist, it will be created.
max_attempts: Maximum number of attempts before termination. An attempt is counted when an error occurs.
sleep_seconds_on_error: Duration of the sleep in seconds before attempting again after an error occurs.
"""
assert max_attempts > 0, 'Value of the max_attempts argument must be positive'
assert sleep_seconds_on_error >= 0, (
'Value of the sleep_seconds_on_error argument must be non-negative'
)
for _ in range(max_attempts):
try:
asyncio.run(run(json_file, charts_file, output_dir))
exit(0)
except Exception:
print(fg.black + bg.yellow + ' An error has occurred! ' + rs.all)
traceback.print_exc()
print(f'\nRetrying in {sleep_seconds_on_error} seconds…', flush=True)
for _ in tqdm(range(sleep_seconds_on_error)):
time.sleep(1)
print(f'Max attempts limit has been reached ({max_attempts}).')
print('Better luck next time!')
exit(-1)
if __name__ == '__main__':
fire.Fire(run_and_retry)
================================================
FILE: metrics/_common.py
================================================
import numpy as np
import polars as pl
def join_left_on_date(df: pl.DataFrame, other: pl.DataFrame):
return df.join(other, on='Date', how='left', maintain_order='left')
def linreg_predict(
x_train: np.ndarray, y_train: np.ndarray, x_all: np.ndarray
) -> np.ndarray:
x_train = x_train.astype(np.float64)
y_train = y_train.astype(np.float64)
x_mean = x_train.mean()
y_mean = y_train.mean()
x_centered = x_train - x_mean
slope = (x_centered * (y_train - y_mean)).sum() / (x_centered * x_centered).sum()
intercept = y_mean - slope * x_mean
return intercept + slope * x_all.astype(np.float64)
================================================
FILE: metrics/base_metric.py
================================================
import traceback
from abc import ABC, abstractmethod
import polars as pl
from matplotlib.axes import Axes
from sty import bg, fg, rs
from api.cbbiinfo_api import cbbi_fetch
from utils import send_error_notification
class BaseMetric(ABC):
@property
@abstractmethod
def name(self) -> str:
pass
@property
@abstractmethod
def description(self) -> str:
pass
@abstractmethod
def _calculate(self, df: pl.DataFrame, ax: list[Axes]) -> pl.Series:
pass
def _fallback(self, df: pl.DataFrame):
return (
df
.join(cbbi_fetch(self.name), on='Date', how='left', maintain_order='left')
.with_columns(pl.col('Value').forward_fill())
.get_column('Value')
)
async def calculate(self, df: pl.DataFrame, ax: list[Axes]):
try:
return self._calculate(df, ax)
except Exception as ex:
traceback.print_exc()
await send_error_notification(ex)
print(
fg.black
+ bg.yellow
+ f' Requesting fallback values for {self.name} (from CBBI.info) '
+ rs.all
)
return self._fallback(df)
================================================
FILE: metrics/mvrv_z_score.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from api.coinsoto_api import cs_fetch
from metrics._common import join_left_on_date, linreg_predict
from metrics.base_metric import BaseMetric
class MVRVMetric(BaseMetric):
@property
def name(self):
return 'MVRV'
@property
def description(self):
return 'MVRV Z-Score'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
bull_days_shift = 6
low_model_adjust = 0.26
df = join_left_on_date(
df,
cs_fetch(
path='chain/index/charts?type=/charts/mvrv-zscore/',
data_selector='value4',
col_name='MVRV',
),
)
df = df.with_columns(
MVRV=(
pl
.when(pl.col('DaysSinceHalving') < pl.col('DaysSincePriceLow'))
.then(pl.col('MVRV').shift(bull_days_shift))
.otherwise(pl.col('MVRV'))
)
)
df = df.with_columns(MVRV=(pl.col('MVRV').forward_fill() + 1).log())
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()]
mvrv = df.get_column('MVRV').to_numpy()
high_model = linreg_predict(high_idx, mvrv[high_idx], row_nr)
low_model = linreg_predict(low_idx, mvrv[low_idx], row_nr) + low_model_adjust
x = df.get_column('Date').to_numpy()
mvrv_index = (mvrv - low_model) / (high_model - low_model)
y_out = np.nan_to_num(mvrv_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('MVRVIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('MVRV')
sns.lineplot(x=x, y=mvrv, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(mvrv_index)
================================================
FILE: metrics/pi_cycle.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from metrics._common import linreg_predict
from metrics.base_metric import BaseMetric
from utils import mark_highs_lows
class PiCycleMetric(BaseMetric):
@property
def name(self):
return 'PiCycle'
@property
def description(self):
return 'Pi Cycle Top Indicator'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
dma_111 = pl.col('Price').rolling_mean(window_size=111, min_samples=111)
dma_350x2 = pl.col('Price').rolling_mean(window_size=350, min_samples=350) * 2
df = df.with_columns(
dma_111.alias('111DMA'),
dma_350x2.alias('350DMAx2'),
).with_columns(PiCycleDiff=pl.col('111DMA').log() - pl.col('350DMAx2').log())
diff = df.get_column('PiCycleDiff').to_numpy()
df = mark_highs_lows(df, 'PiCycleDiff', True, 365 * 2, 365)
crossed = diff > 0
crossed_idx = np.flatnonzero(crossed)
uncrossed_idx = np.flatnonzero(~crossed)
crossed_segments = (
np.split(crossed_idx, np.where(np.diff(crossed_idx) > 1)[0] + 1)
if crossed_idx.size
else []
)
uncrossed_segments = (
np.split(uncrossed_idx, np.where(np.diff(uncrossed_idx) > 1)[0] + 1)
if uncrossed_idx.size
else []
)
distance_floor = np.zeros(df.height, dtype=np.float64)
distance_from_zero = np.abs(diff)
for i, crossed_seg in enumerate(crossed_segments):
seg_distance = distance_from_zero[crossed_seg]
max_pos = int(np.nanargmax(seg_distance))
max_idx = int(crossed_seg[max_pos])
max_distance = float(seg_distance[max_pos])
distance_floor[max_idx + 1 :] = max_distance
uncrossed_seg = (
uncrossed_segments[i + 1] if i + 1 < len(uncrossed_segments) else None
)
if uncrossed_seg is not None and uncrossed_seg.size:
above = uncrossed_seg[distance_from_zero[uncrossed_seg] >= max_distance]
if above.size:
distance_floor[int(above.min()) :] = 0
crossed_next = (
crossed_segments[i + 1] if i + 1 < len(crossed_segments) else None
)
if crossed_next is not None and crossed_next.size:
distance_floor[int(crossed_next.min()) :] = 0
high_idx = np.flatnonzero(df.get_column('PiCycleDiffHigh').to_numpy())
row_nr = np.arange(df.height)
low_idx = np.flatnonzero(df.get_column('PiCycleDiffLow').to_numpy())
target = np.zeros(df.height, dtype=np.float64)
if high_idx.size >= 3:
target = np.minimum(
linreg_predict(high_idx, diff[high_idx], row_nr),
0.0,
)
finite_idx = np.flatnonzero(np.isfinite(diff))
cold_model = np.full(df.height, np.nan, dtype=np.float64)
cycle_starts = np.array([int(finite_idx[0]), *low_idx], dtype=np.int64)
for i, start in enumerate(cycle_starts):
end = (
int(cycle_starts[i + 1] - 1)
if i + 1 < cycle_starts.size
else df.height - 1
)
cold_model[start : end + 1] = diff[start]
distance = np.maximum(np.maximum(target - diff, 0.0), distance_floor)
index = 1 - distance / np.abs(target - cold_model)
x = df.get_column('Date').to_numpy()
y_out = np.nan_to_num(index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('PiCycleIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('PiCycleDiff')
sns.lineplot(x=x, y=diff, ax=ax[1])
sns.lineplot(x=x, y=target, ax=ax[1])
sns.lineplot(x=x, y=cold_model, ax=ax[1])
sns.lineplot(x=x, y=target - distance_floor, ax=ax[1], linestyle='--')
return pl.Series('PiCycleIndex', index)
================================================
FILE: metrics/puell_multiple.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from metrics._common import linreg_predict
from metrics.base_metric import BaseMetric
class PuellMetric(BaseMetric):
@property
def name(self):
return 'Puell'
@property
def description(self):
return 'Puell Multiple'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
puell_log = (
df
.get_column('PuellMultiple')
.forward_fill()
.log()
.rolling_mean(window_size=3, min_samples=1)
.to_numpy()
)
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
high_model = linreg_predict(high_idx, puell_log[high_idx], row_nr)
low_model = -1.0
x = df.get_column('Date').to_numpy()
puell_index = (puell_log - low_model) / (high_model - low_model)
y_out = np.nan_to_num(puell_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('PuellIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('PuellLog')
sns.lineplot(x=x, y=puell_log, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=np.full(df.height, low_model, dtype=np.float64), ax=ax[1])
return pl.Series(puell_index)
================================================
FILE: metrics/reserve_risk.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from api.coinsoto_api import cs_fetch
from metrics._common import join_left_on_date, linreg_predict
from metrics.base_metric import BaseMetric
class ReserveRiskMetric(BaseMetric):
@property
def name(self):
return 'ReserveRisk'
@property
def description(self):
return 'Reserve Risk'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
days_shift = 1
df = join_left_on_date(
df,
cs_fetch(
path='chain/index/charts?type=/charts/reserve-risk/',
data_selector='value4',
col_name='Risk',
),
)
df = df.with_columns(Risk=pl.col('Risk').shift(days_shift).forward_fill())
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()][1:]
risk_log = np.log(df.get_column('Risk').to_numpy())
high_model = linreg_predict(high_idx, risk_log[high_idx], row_nr) - 0.15
low_model = linreg_predict(low_idx, risk_log[low_idx], row_nr)
x = df.get_column('Date').to_numpy()
risk_index = (risk_log - low_model) / (high_model - low_model)
y_out = np.nan_to_num(risk_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('RiskIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('RiskLog')
sns.lineplot(x=x, y=risk_log, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(risk_index)
================================================
FILE: metrics/rhodl_ratio.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from api.coinsoto_api import cs_fetch
from metrics._common import join_left_on_date, linreg_predict
from metrics.base_metric import BaseMetric
class RHODLMetric(BaseMetric):
@property
def name(self):
return 'RHODL'
@property
def description(self):
return 'RHODL Ratio'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
remote_df = cs_fetch(
path='chain/index/charts?type=/charts/rhodl-ratio/',
data_selector='value1',
col_name='RHODL',
)
df = join_left_on_date(df, remote_df).with_columns(
RHODL=pl.col('RHODL').forward_fill()
)
row_nr = np.arange(df.height)
high_mask = df.get_column('PriceHigh').to_numpy() | (
df.get_column('Date').to_numpy() == np.datetime64('2024-12-18')
)
high_idx = row_nr[high_mask]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()][1:]
rhodl = df.get_column('RHODL').to_numpy()
rhodl_log = np.log(rhodl)
high_model = linreg_predict(high_idx, rhodl_log[high_idx], row_nr)
low_model = linreg_predict(low_idx, rhodl_log[low_idx], row_nr)
x = df.get_column('Date').to_numpy()
rhodl_index = (rhodl_log - low_model) / (high_model - low_model)
y_out = np.nan_to_num(rhodl_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('RHODLIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('RHODLLog')
sns.lineplot(x=x, y=rhodl_log, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(rhodl_index)
================================================
FILE: metrics/rupl.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from api.coinsoto_api import cs_fetch
from metrics._common import join_left_on_date, linreg_predict
from metrics.base_metric import BaseMetric
class RUPLMetric(BaseMetric):
@property
def name(self):
return 'RUPL'
@property
def description(self):
return 'RUPL/NUPL Chart'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
df = join_left_on_date(
df,
cs_fetch(
path='chain/index/charts?type=/charts/relative-unrealized-prof/',
data_selector='value1',
col_name='RUPL',
),
)
df = df.with_columns(RUPL=pl.col('RUPL').forward_fill())
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()][1:]
rupl = df.get_column('RUPL').to_numpy()
x_all = row_nr
high_model = linreg_predict(high_idx, rupl[high_idx], x_all)
low_model = linreg_predict(low_idx, rupl[low_idx], x_all)
x = df.get_column('Date').to_numpy()
rupl_index = (rupl - low_model) / (high_model - low_model)
y_out = np.nan_to_num(rupl_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('RUPLIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('RUPL')
sns.lineplot(x=x, y=rupl, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(rupl_index)
================================================
FILE: metrics/trolololo.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from metrics._common import linreg_predict
from metrics.base_metric import BaseMetric
class TrolololoMetric(BaseMetric):
@property
def name(self):
return 'Trolololo'
@property
def description(self):
return 'Bitcoin Trolololo Trend Line'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
x = df.get_column('Date').to_numpy()
price_log = df.get_column('PriceLog').to_numpy()
begin_date = np.datetime64('2012-01-01T00:00:00')
days_since_begin = (x - begin_date) / np.timedelta64(1, 'D')
trolo_top_log = np.log(10.0) * (
2.900 * np.log(days_since_begin + 1400) - 19.463
)
trolo_bottom_log = np.log(10.0) * (
2.788 * np.log(days_since_begin + 1200) - 19.463
)
overshoot_actual = price_log - trolo_top_log
undershoot_actual = price_log - trolo_bottom_log
row_nr = np.arange(df.height)
after_begin = days_since_begin >= 0
high_mask = df.get_column('PriceHigh').to_numpy() & after_begin
low_mask = df.get_column('PriceLow').to_numpy() & after_begin
high_idx = row_nr[high_mask]
low_idx = row_nr[low_mask]
high_y = overshoot_actual[high_idx].copy()
high_y[0] *= 0.6 # the first value seems too high
overshoot_model = linreg_predict(high_idx, high_y, row_nr)
undershoot_model = linreg_predict(low_idx, undershoot_actual[low_idx], row_nr)
high_model = trolo_top_log + overshoot_model
low_model = trolo_bottom_log + undershoot_model
trolo_index = (price_log - low_model) / (high_model - low_model)
y_out = np.nan_to_num(trolo_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('TroloIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('PriceLog')
sns.lineplot(x=x, y=price_log, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(trolo_index)
================================================
FILE: metrics/two_year_moving_average.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from metrics._common import linreg_predict
from metrics.base_metric import BaseMetric
class TwoYearMovingAverageMetric(BaseMetric):
@property
def name(self):
return '2YMA'
@property
def description(self):
return '2 Year Moving Average'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()]
price_log = df.get_column('PriceLog').to_numpy()
two_yma = df.get_column('Price730DMA').to_numpy()
two_yma_log = np.log(two_yma)
log_diff = price_log - two_yma_log
overshoot_model = linreg_predict(high_idx, log_diff[high_idx], row_nr)
undershoot_model = linreg_predict(low_idx, log_diff[low_idx], row_nr)
x = df.get_column('Date').to_numpy()
two_yma_high_model = overshoot_model + two_yma_log
two_yma_low_model = undershoot_model + two_yma_log
two_yma_index = (price_log - two_yma_low_model) / (
two_yma_high_model - two_yma_low_model
)
y_out = np.nan_to_num(two_yma_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('2YMAIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('PriceLog')
sns.lineplot(x=x, y=price_log, ax=ax[1])
sns.lineplot(x=x, y=two_yma_high_model, ax=ax[1])
sns.lineplot(x=x, y=two_yma_low_model, ax=ax[1])
return pl.Series(two_yma_index)
================================================
FILE: metrics/woobull_topcap_cvdd.py
================================================
import numpy as np
import polars as pl
import seaborn as sns
from matplotlib.axes import Axes
from metrics._common import join_left_on_date, linreg_predict
from metrics.base_metric import BaseMetric
from utils import HTTP
def _woocharts_xy_ms_df(*, x: list[object], y: list[object], y_name: str):
return (
pl
.DataFrame({
'Date': pl.Series(x, dtype=pl.Int64),
y_name: pl.Series(y, dtype=pl.Float64),
})
.with_columns(
Date=(
pl
.from_epoch(pl.col('Date'), time_unit='ms')
.dt.cast_time_unit('us')
.dt.replace_time_zone('UTC')
)
)
.select('Date', y_name)
)
def _fetch_df():
response = HTTP.get('https://woocharts.com/bitcoin-price-models/data/chart.json')
response.raise_for_status()
data = response.json()
df_top = _woocharts_xy_ms_df(
x=data['top_']['x'],
y=data['top_']['y'],
y_name='Top',
)
df_cvdd = _woocharts_xy_ms_df(
x=data['cvdd']['x'],
y=data['cvdd']['y'],
y_name='CVDD',
)
return df_top.join(df_cvdd, on='Date', how='inner', maintain_order='left')
class WoobullMetric(BaseMetric):
@property
def name(self):
return 'Woobull'
@property
def description(self):
return 'Woobull Top Cap vs CVDD'
def _calculate(self, df: pl.DataFrame, ax: list[Axes]):
df = join_left_on_date(df, _fetch_df())
row_nr = np.arange(df.height)
high_idx = row_nr[df.get_column('PriceHigh').to_numpy()]
low_idx = row_nr[df.get_column('PriceLow').to_numpy()][1:]
top_log = np.log(df.get_column('Top').to_numpy())
cvdd_log = np.log(df.get_column('CVDD').to_numpy())
price_log = df.get_column('PriceLog').to_numpy()
woobull = (price_log - cvdd_log) / (top_log - cvdd_log)
high_model = linreg_predict(high_idx, woobull[high_idx], row_nr) - 0.025
low_model = linreg_predict(low_idx, woobull[low_idx], row_nr)
x = df.get_column('Date').to_numpy()
woobull_index = (woobull - low_model) / (high_model - low_model)
y_out = np.nan_to_num(woobull_index, nan=0.0)
ax[0].set_title(self.description)
ax[0].set_xlabel('Date')
ax[0].set_ylabel('WoobullIndex')
sns.lineplot(x=x, y=y_out, ax=ax[0])
ax[1].set_xlabel('Date')
ax[1].set_ylabel('Woobull')
sns.lineplot(x=x, y=woobull, ax=ax[1])
sns.lineplot(x=x, y=high_model, ax=ax[1])
sns.lineplot(x=x, y=low_model, ax=ax[1])
return pl.Series(woobull_index)
================================================
FILE: pyproject.toml
================================================
[project]
name = "cbbi"
version = "0.0.0"
requires-python = "~=3.14.0"
dependencies = [
"fire",
"httpx[brotli,zstd]",
"matplotlib",
"numpy",
"polars",
"pyfiglet",
"python-telegram-bot",
"seaborn",
"sty",
"tqdm",
]
[tool.ruff]
indent-width = 4
line-length = 88
target-version = "py314"
[tool.ruff.lint]
ignore = [
"S101", # assert
]
# see https://docs.astral.sh/ruff/rules/ for rules documentation
select = [
"A", # flake8-builtins
"ARG", # flake8-unused-argumentsf
"ASYNC", # flake8-async
"B", # flake8-bugbear
# "COM", # flake8-commas
"C4", # flake8-comprehensions
"E4", # pycodestyle
"E7",
"E9",
"F", # pyflakes
# "FBT", # flake8-boolean-trap
"FLY", # flynt
# "FURB", # refurb (preview)
"G", # flake8-logging-format
"I", # isort
"INT", # flake8-gettext
# "LOG", # flake8-logging (preview)
"N", # pep8-naming
"NPY", # numpy
"PERF", # perflint
"PGH", # pygrep-hooks
"PIE", # flake8-pie
"Q", # flake8-quotes
"UP", # pyupgrade
# "PL", # pylint
"PT", # flake8-pytest-style
"PTH", # flake8-use-pathlib
"PYI", # flake8-pyi
"RSE", # flake8-raise
"RUF", # ruff
"S", # flake8-bandit
"SIM", # flake8-simplify
"SLF", # flake8-self
"SLOT", # flake8-slots
"T10", # flake8-debugger
# "T20", # flake8-print
# "TRY", # tryceratops
"YTT", # flake8-2020
]
fixable = ["ALL"]
unfixable = []
[tool.ruff.lint.flake8-builtins]
builtins-ignorelist = ["id", "open", "type"]
[tool.ruff.lint.flake8-quotes]
docstring-quotes = "double"
inline-quotes = "single"
multiline-quotes = "double"
[tool.ruff.lint.pylint]
max-args = 10
[tool.ruff.format]
indent-style = "space"
line-ending = "lf"
quote-style = "single"
skip-magic-trailing-comma = false
preview = true
[tool.setuptools]
packages = ["."]
[tool.uv]
package = false
python-downloads = "never"
python-preference = "only-system"
================================================
FILE: shell.nix
================================================
{ }:
let
# Update with `nixpkgs-update` command
pkgs =
import
(fetchTarball "https://github.com/NixOS/nixpkgs/archive/68a8af93ff4297686cb68880845e61e5e2e41d92.tar.gz")
{ };
pythonLibs = with pkgs; [
stdenv.cc.cc.lib
zlib.out
];
python' =
with pkgs;
(symlinkJoin {
name = "python";
paths = [ python314 ];
buildInputs = [ makeWrapper ];
postBuild = ''
wrapProgram "$out/bin/python3.14" --prefix LD_LIBRARY_PATH : "${lib.makeLibraryPath pythonLibs}"
'';
});
packages' = with pkgs; [
python'
coreutils
curl
gnused
jq
ruff
uv
(writeShellScriptBin "nixpkgs-update" ''
set -e
hash=$(
curl -fsSL \
https://prometheus.nixos.org/api/v1/query \
-d 'query=channel_revision{channel="nixpkgs-unstable"}' |
jq -r ".data.result[0].metric.revision"
)
sed -i "s|nixpkgs/archive/[0-9a-f]\\{40\\}|nixpkgs/archive/$hash|" shell.nix
echo "Nixpkgs updated to $hash"
'')
(writeShellScriptBin "docker-build-push" ''
set -e
if command -v podman &> /dev/null; then docker() { podman "$@"; } fi
docker push $(docker load < $(nix-build --no-out-link) | sed -En 's/Loaded image: (\S+)/\1/p')
'')
];
shell' = ''
export SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt
export PYTHONNOUSERSITE=1
export PYTHONPATH=""
export TZ=UTC
if [ -f .env ]; then
echo "Loading .env file"
set -a; . .env; set +a
else
echo "Skipped loading .env file (not found)"
fi
current_python=$(readlink -e .venv/bin/python || echo "")
current_python=''${current_python%/bin/*}
[ "$current_python" != "${python'}" ] && rm -rf .venv/
echo "Installing Python dependencies"
export UV_PYTHON="${python'}/bin/python"
uv sync --frozen
source .venv/bin/activate
export UV_PYTHON="$VIRTUAL_ENV/bin/python"
'';
in
pkgs.mkShell {
buildInputs = packages';
shellHook = shell';
}
================================================
FILE: utils.py
================================================
import os
import traceback
from math import ceil
import numpy as np
import polars as pl
import telegram
from httpx import Client
from sty import bg
HTTP = Client(
headers={
'User-Agent': 'Mozilla/5.0 (Linux x86_64; rv:140.0) Gecko/20100101 Firefox/140.0'
},
timeout=60,
follow_redirects=True,
)
def mark_highs_lows(
df: pl.DataFrame,
col: str,
begin_with_high: bool,
window_size: int,
ignore_last_rows: int,
):
"""
Marks highs and lows (peaks) of the column values inside the given DataFrame.
Marked points are indicated by `True` inside their corresponding, newly added, `col + "High"` and `col + "Low"` columns.
Args:
df: DataFrame from which the column values are selected and to which marked points columns are added.
col: Column name of which values are selected inside the given DataFrame.
begin_with_high: Indicates whether the first peak is high or low.
window_size: Window size for the algorithm to consider.
Too low value will mark too many peaks, whereas, too high value will mark too little peaks.
ignore_last_rows: Amount of trailing DataFrame rows for which highs and lows should not be marked.
Returns:
Modified input DataFrame with columns, indicating the marked points, added.
"""
col_high = col + 'High'
col_low = col + 'Low'
values = df.get_column(col).to_numpy()
high_marks = np.zeros(len(values), dtype=np.bool_)
low_marks = np.zeros(len(values), dtype=np.bool_)
searching_high = begin_with_high
current_index = 0
while True:
window_end = min(current_index + window_size, len(values) - 1)
window = values[current_index : window_end + 1]
if sum(~np.isnan(window)) == 0 and window.shape[0] > 1:
current_index += window.shape[0]
continue
if window.shape[0] <= 1:
break
window_index = current_index + (
np.nanargmax(window) if searching_high else np.nanargmin(window)
)
if window_index == current_index:
if searching_high:
high_marks[window_index] = True
else:
low_marks[window_index] = True
searching_high = not searching_high
window_index = window_index + 1
current_index = window_index
if ignore_last_rows > 0:
high_marks[-ignore_last_rows:] = False
low_marks[-ignore_last_rows:] = False
# stabilize the algorithm until a next major update
stabilize_mask = df.get_column('Date').to_numpy() >= np.datetime64('2025-10-07')
high_marks[stabilize_mask] = False
low_marks[stabilize_mask] = False
return df.with_columns(
pl.Series(col_high, high_marks, dtype=pl.Boolean),
pl.Series(col_low, low_marks, dtype=pl.Boolean),
)
def mark_days_since(df: pl.DataFrame, cols: list[str]):
"""
This function takes a DataFrame and a list of column names
and calculates the number of days since the last `True` value for each column in the list.
The resulting DataFrame will have new columns for each input column, with the column name prefixed by 'DaysSince'.
The value in these new columns will be the number of days since the last `True` value in the corresponding input column.
Args:
df: The input DataFrame.
cols: The list of boolean marker columns to calculate the days since the last `True` value.
Returns:
The modified DataFrame with the new columns added.
"""
df = df.with_row_index(name='_row_nr')
exprs: dict[str, pl.Expr] = {}
for col in cols:
last_event_idx = (
pl.when(pl.col(col)).then(pl.col('_row_nr')).otherwise(None).forward_fill()
)
exprs[f'DaysSince{col}'] = pl.col('_row_nr') - last_event_idx
return df.with_columns(**exprs).drop('_row_nr')
def format_percentage(val: float, suffix: str = ' %'):
"""
Formats a percentage value (0.0 - 1.0) in the standardized way.
Returned value has a constant width and a trailing '%' sign.
Args:
val: Percentage value to be formatted.
suffix: String to be appended to the result.
Returns:
Formatted percentage value with a constant width and trailing '%' sign.
Examples:
>>> print(format_percentage(0.359))
str(' 36 %')
>>> print(format_percentage(1.1))
str('110 %')
"""
return f'{ceil(val * 100): >3d}{suffix}'
def get_color(val: float):
"""
Maps a percentage value (0.0 - 1.0) to its corresponding color.
The color is used to indicate whether the value is low (0.0) or high (1.0).
Returned value is a valid sty-package color string.
Args:
val: Percentage value to be mapped into a color.
Returns:
Valid sty-package color string.
"""
config = [
bg.da_red,
0.3,
bg.da_yellow,
0.65,
bg.da_green,
0.85,
bg.da_cyan,
0.97,
bg.da_magenta,
]
bin_index = np.digitize([round(val, 2)], config[1::2])[0]
return config[::2][bin_index]
async def send_error_notification(exception: Exception):
"""
This function sends a notification to a Telegram chat with details of the provided exception.
Args:
exception: The exception to be reported.
Returns:
A boolean indicating whether the notification was sent successfully.
"""
telegram_token = os.getenv('TELEGRAM_TOKEN')
telegram_chat_id = os.getenv('TELEGRAM_CHAT_ID')
if not telegram_token or not telegram_chat_id:
return False
async with telegram.Bot(telegram_token) as bot:
await bot.send_message(
telegram_chat_id,
f'🚨 An error has occurred: {exception!s}\n'
f'\n'
f'🔍️ Stack trace\n'
f'{"".join(traceback.format_exception(exception))}',
parse_mode='HTML',
)
return True