NiLang.jl (逆lang), is a reversible domain-specific language (DSL) that allow a program to go back to the past.
* Requires Julia version >= 1.3,
NiLang features:
* any program written in NiLang is differentiable,
* a reversible language with abstraction and arrays,
* complex values
* reversible logarithmic number system

[](https://codecov.io/gh/GiggleLiu/NiLang.jl)
The main docs can be found here:
[](https://giggleliu.github.io/NiLang.jl/stable/)
[](https://giggleliu.github.io/NiLang.jl/dev/)
There are also some Pluto-based notebooks:
* [tutorial](https://giggleliu.github.io/NiLang.jl/dev/notebooks/basic.html)
* [documentation](https://giggleliu.github.io/NiLang.jl/dev/notebooks/documentation.html)
* [Billiard ball model cellular automata](https://giggleliu.github.io/NiLang.jl/dev/notebooks/margolus.html)
> The strangeness of reversible computing is mainly due to
> our lack of experience with it.—Henry Baker, 1992
## To Start
```
pkg> add NiLang
```
## An example: Compute the norm of a vector
```julia
julia> using NiLang
julia> @i function f(res, y, x)
for i=1:length(x)
y += x[i] ^ 2
end
res += sqrt(y)
end
julia> res_out, y_out, x_out = f(0.0, 0.0, [1, 2, 3.0])
(3.7416573867739413, 14.0, [1.0, 2.0, 3.0])
julia> (~f)(res_out, y_out, x_out) # automatically generated inverse program.
(0.0, 0.0, [1.0, 2.0, 3.0])
julia> ∂res, ∂y, ∂x = NiLang.AD.gradient(Val(1), f, (0.0, 0.0, [1, 2, 3.0]))
# automatic differentiation, `Val(1)` means the first argument of `f` is the loss.
(1.0, 0.1336306209562122, [0.2672612419124244, 0.5345224838248488, 0.8017837257372732])
```
The performance of reversible programming automatic differentiation is much better than most traditional frameworks. Here is why, and how it works,

## Check our [paper](https://arxiv.org/abs/2003.04617)
```bibtex
@misc{Liu2020,
title={Differentiate Everything with a Reversible Programming Language},
author={Jin-Guo Liu and Taine Zhao},
year={2020},
eprint={2003.04617},
archivePrefix={arXiv},
primaryClass={cs.PL}
}
```
================================================
FILE: benchmark/besselj_gpu.jl
================================================
using NiLang, NiLang.AD
using CuArrays, CUDAnative, GPUArrays
using BenchmarkTools
@i @inline function :(-=)(CUDAnative.pow)(out!::GVar{T}, x::GVar{T}, n::GVar) where T
value(out!) -= CUDAnative.pow(value(x), value(n))
# grad x
@routine @invcheckoff begin
@zeros T anc1 anc2 anc3 jac1 jac2
DEC(value(n))
anc1 += CUDAnative.pow(value(x), value(n))
INC(value(n))
jac1 += anc1 * value(n)
# get grad of n
anc2 += log(value(x))
anc3 += CUDAnative.pow(value(x), value(n))
jac2 += anc3*anc2
end
grad(x) += grad(out!) * jac1
grad(n) += grad(out!) * jac2
~@routine
end
@i @inline function :(-=)(CUDAnative.pow)(out!::GVar{T}, x::GVar, n) where T
value(out!) -= CUDAnative.pow(value(x), n)
@routine @invcheckoff begin
anc1 ← zero(value(x))
jac ← zero(value(x))
DEC(value(n))
anc1 += CUDAnative.pow(value(x), n)
INC(value(n))
jac += anc1 * n
end
grad(x) += grad(out!) * jac
~@routine
end
@i @inline function :(-=)(CUDAnative.pow)(out!::GVar{T}, x, n::GVar) where T
value(out!) -= CUDAnative.pow(x, value(n))
# get jac of n
@routine @invcheckoff begin
anc1 ← zero(x)
anc2 ← zero(x)
jac ← zero(x)
anc1 += log(x)
anc2 += CUDAnative.pow(x, value(n))
jac += anc1*anc2
end
grad(n) += grad(out!) * jac
~@routine
end
# You need to replace all "^" operations in `ibessel` with `CUDAnative.pow`.
# Please remember to turn invertiblity check off, because error handling is not supported in a cuda thread.
# Function `i_dirtymul` and `i_factorial` are not changed.
@i function ibesselj(out!, ν, z; atol=1e-8)
@routine @invcheckoff begin
k ← 0
fact_nu ← zero(ν)
halfz ← zero(z)
halfz_power_nu ← zero(z)
halfz_power_2 ← zero(z)
out_anc ← zero(z)
anc1 ← zero(z)
anc2 ← zero(z)
anc3 ← zero(z)
anc4 ← zero(z)
anc5 ← zero(z)
halfz += z / 2
halfz_power_nu += CUDAnative.pow(halfz, ν)
halfz_power_2 += CUDAnative.pow(halfz, 2)
i_factorial(fact_nu, ν)
anc1 += halfz_power_nu/fact_nu
out_anc += anc1
@from k==0 while abs(unwrap(anc1)) > atol && abs(unwrap(anc4)) < atol
INC(k)
@routine begin
anc5 += k
anc5 += ν
anc2 -= k * anc5
anc3 += halfz_power_2 / anc2
end
i_dirtymul(anc1, anc3, anc4)
out_anc += anc1
~@routine
end
end
out! += out_anc
~@routine
end
# Define your reversible kernel function that calls the reversible bessel function
@i function ibesselj_kernel(out!, ν, z, atol)
i ← (blockIdx().x-1) * blockDim().x + threadIdx().x
@inbounds ibesselj(out![i], ν, z[i]; atol=atol)
@invcheckoff i → (blockIdx().x-1) * blockDim().x + threadIdx().x
end
# To launch this reversible kernel, you also need a reversible host function.
@i function ibesselj(out!::CuVector, ν, z::CuVector; atol=1e-8)
XY ← GPUArrays.thread_blocks_heuristic(length(out!))
@cuda threads=XY.:1 blocks=XY.:2 ibesselj_kernel(out!, ν, z, atol)
@invcheckoff XY → GPUArrays.thread_blocks_heuristic(length(out!))
end
# To test this function, we first define input parameters `a` and output `out!`
N = 4096
T = Float64
a = CuArray(ones(T, N))
out! = CuArray(zeros(T, N))
# We wrap the output with a randomly initialized gradient field, suppose we get the gradients from a virtual loss function.
# Also, we need to initialize an empty gradient field for elements in input cuda tensor `a`.
out! = ibesselj(out!, 2, GVar.(a))[1]
out_g! = GVar.(out!, CuArray(ones(T, N)))
a_g = GVar.(a)
# Call the inverse program, the multiple dispatch will drive you to the goal.
println("Benchmarking NiLang on CUDA, N = $N, T = $T")
display(@benchmark CuArrays.@sync (~ibesselj)($out_g!, 2, $a_g))
================================================
FILE: benchmark/besselj_irreversible.jl
================================================
using Zygote
using ForwardDiff
using BenchmarkTools
function besselj(ν, z; atol=1e-8)
k = 0
s = (z/2)^ν / factorial(ν)
out = s
while abs(s) > atol
k += 1
s *= (-1) / k / (k+ν) * (z/2)^2
out += s
end
out
end
function grad_besselj_manual(ν, z; atol=1e-8)
(besselj(ν-1, z; atol=atol) - besselj(ν+1, z); atol=atol)/2
end
println("Benchmarking Julia")
display(@benchmark besselj(2, 1.0))
println("Benchmarking Manual")
display(@benchmark grad_besselj_manual(2, 1.0))
println("Benchmarking Zygote")
display(@benchmark Zygote.gradient(besselj, 2, 1.0))
println("Benchmarking ForwardDiff")
display(@benchmark ForwardDiff.derivative(x->besselj(2, x), 1.0))
================================================
FILE: benchmark/besselj_reversible.jl
================================================
using NiLang, NiLang.AD
using BenchmarkTools
include("../exmamples/besselj.jl")
# To test this function, we first define input parameters `a` and output `out!`
a = 1.0
out! = 0.0
# We wrap the output with a randomly initialized gradient field, suppose we get the gradients from a virtual loss function.
# Also, we need to initialize an empty gradient field for elements in input cuda tensor `a`.
out! = ibesselj(out!, 2, a)[1]
out_g! = GVar(out!, 1.0)
a_g = GVar(a)
# Call the inverse program, the multiple dispatch will drive you to the goal.
println("Benchmarking NiLang")
display(@benchmark ibesselj($out!, 2, $a))
println("Benchmarking NiLang.AD")
display(@benchmark (~ibesselj)($out_g!, 2, $a_g))
================================================
FILE: benchmark/first_function.jl
================================================
t1 = time()
using NiLang
@i function dot(x, y, z)
for i=1:10
x += y[i]' * z[i]
end
end
t2 = time()
println("costs $(t2-t1)s")
================================================
FILE: benchmark/stack.jl
================================================
================================================
FILE: docs/Project.toml
================================================
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2"
Compose = "a81c6b42-2e10-5240-aca2-a61377ecd94b"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
FixedPointNumbers = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589"
LogarithmicNumbers = "aa2f6b4e-9042-5d33-9679-40d3a6b85899"
NiLang = "ab4ef3a6-0b42-11ea-31f6-e34652774712"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Viznet = "52a3aca4-6234-47fd-b74a-806bdf78ede9"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
================================================
FILE: docs/make.jl
================================================
using Documenter, NiLang
using SparseArrays
using Literate
tutorialpath = joinpath(@__DIR__, "src/examples")
sourcepath = joinpath(dirname(@__DIR__), "examples")
for jlfile in ["besselj.jl", "sparse.jl", "qr.jl", "port_zygote.jl", "port_chainrules.jl", "fib.jl", "unitary.jl", "nice.jl", "realnvp.jl", "boxmuller.jl", "lognumber.jl", "pyramid.jl"]
Literate.markdown(joinpath(sourcepath, jlfile), tutorialpath)
end
# # Pluto pages
# import Pkg
# Pkg.add([
# Pkg.PackageSpec(url="https://github.com/GiggleLiu/PlutoUtils.jl", rev="static-export"),
# Pkg.PackageSpec(url="https://github.com/fonsp/Pluto.jl", rev="05e5b68"),
# ]);
makedocs(;
modules=[NiLang],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
"What and Why" => "why.md",
"Tutorial" => Any[
"tutorial.md",
"examples/port_zygote.md",
"examples/port_chainrules.md"
],
"Examples" => Any[
"examples/fib.md",
"examples/pyramid.md",
"examples/besselj.md",
"examples/sparse.md",
"examples/lognumber.md",
"examples/unitary.md",
#"examples/nice.md",
#"examples/realnvp.md",
"examples/qr.md",
"examples/boxmuller.md",
],
"API & Manual" => Any[
"instructions.md",
"extend.md",
"api.md",
"faq.md",
]
],
repo="https://github.com/GiggleLiu/NiLang.jl/blob/{commit}{path}#L{line}",
sitename="NiLang.jl",
authors="JinGuo Liu, thautwarm",
)
# import PlutoUtils
# PlutoUtils.Export.github_action(; notebook_dir=NiLang.project_relative_path("notebooks"), offer_binder=false, export_dir=NiLang.project_relative_path("docs", "build", "notebooks"), generate_default_index=false, project=NiLang.project_relative_path("docs"))
deploydocs(;
repo="github.com/GiggleLiu/NiLang.jl.git",
)
================================================
FILE: docs/src/api.md
================================================
```@meta
DocTestSetup = quote
using NiLangCore, NiLang, NiLang.AD, Test
end
```
# API Manual
## Compiling Tools (Reexported from NiLangCore)
```@autodocs
Modules = [NiLangCore]
Order = [:macro, :function, :type]
```
## Instructions
```@autodocs
Modules = [NiLang]
Order = [:macro, :function, :type]
```
## Automatic Differentiation
```@autodocs
Modules = [NiLang.AD]
Order = [:macro, :function, :type]
```
================================================
FILE: docs/src/extend.md
================================================
# How to extend
## Extend `+=`, `-=` and `⊻=` for irreversible one-out functions
It directly works
```julia
julia> using SpecialFunctions, NiLang
julia> x, y = 2.1, 1.0
(2.1, 1.0)
julia> @instr y += besselj0(x)
2.1
julia> x, y
(2.1, 1.7492472503018073)
julia> @instr ~(y += besselj0(x))
2.1
julia> x, y
(2.1, 1.0)
```
Here the statement
```julia
@instr y += besselj0(x)
```
is mapped to
```julia
@instr y += besselj0(x)
```
However, doing this does not give you correct gradients.
For `y += scalar_out_function(x)`, one can bind the backward rules like
```julia
julia> using ChainRules, NiLang.AD
julia> besselj0_back(x) = ChainRules.rrule(besselj0, x)[2](1.0)[2]
besselj0_back (generic function with 1 method)
julia> primitive_grad(::typeof(besselj0), x::Real) = besselj0_back(x)
primitive_grad (generic function with 1 method)
julia> xg, yg = GVar(x), GVar(y, 1.0)
(GVar(2.1, 0.0), GVar(1.0, 1.0))
julia> @instr yg -= besselj0(xg)
GVar(2.1, -0.5682921357570385)
julia> xg, yg
(GVar(2.1, -0.5682921357570385), GVar(0.8333930196680097, 1.0))
julia> @instr yg += besselj0(xg)
GVar(2.1, 0.0)
julia> xg, yg
(GVar(2.1, 0.0), GVar(1.0, 1.0))
julia> NiLang.AD.check_grad(PlusEq(besselj0), (1.0, 2.1); iloss=1)
true
julia> using BenchmarkTools
julia> @benchmark PlusEq(besselj0)($yg, $xg)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 451.523 ns (0.00% GC)
median time: 459.431 ns (0.00% GC)
mean time: 477.419 ns (0.00% GC)
maximum time: 857.036 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 197
```
Good!
## Reversible multi-in multi-out functions
It is easy to do, define two normal Julia functions reversible to each other,
using the macro `@dual` to tell the compiler they are reversible to each other.
For example, a pair of dual functions `ROT` (2D rotation) and `IROT` (inverse rotation) that already defined in NiLang.
```julia
"""
ROT(a!, b!, θ) -> a!', b!', θ
"""
@inline function ROT(i::Real, j::Real, θ::Real)
a, b = rot(i, j, θ)
a, b, θ
end
"""
IROT(a!, b!, θ) -> ROT(a!, b!, -θ)
"""
@inline function IROT(i::Real, j::Real, θ::Real)
i, j, _ = ROT(i, j, -θ)
i, j, θ
end
@dual ROT IROT
```
One can easily check the reversibility by typing
```julia
julia> check_inv(ROT, (1.0, 2.0, 3.0))
true
```
For self-reversible functions, one can declare the reversibility for it like this
```julia
"""
SWAP(a!, b!) -> b!, a!
"""
@inline function SWAP(a!::Real, b!::Real)
b!, a!
end
@selfdual SWAP
```
To bind gradients for this multi-in, multi-out function.
The general approach is *Binding the backward rule on its inverse*!
```julia
@i @inline function IROT(a!::GVar, b!::GVar, θ::GVar)
IROT(a!.x, b!.x, θ.x)
NEG(θ.x)
θ.x -= π/2
ROT(a!.g, b!.g, θ.x)
θ.g += a!.x * a!.g
θ.g += b!.x * b!.g
θ.x += π/2
NEG(θ.x)
ROT(a!.g, b!.g, π/2)
end
@i @inline function IROT(a!::GVar, b!::GVar, θ::Real)
IROT(a!.x, b!.x, θ)
NEG(θ)
θ -= π/2
ROT(a!.g, b!.g, θ)
θ += π/2
NEG(θ)
ROT(a!.g, b!.g, π/2)
end
@nograd IROT(a!::Real, b!::Real, θ::GVar)
```
When this inverse function is called, the backward rules are automatically applied.
Good! This method can also be extended to linear algebra functions, however, the memory allocation overhead is high because one need to wrap each element with `GVar`.
================================================
FILE: docs/src/faq.md
================================================
## Why reversibility check fails even though the program is reversible?
Due to the fact that floating pointing numbers are not exactly reversible, sometimes the invertibility check might fail due to the rounding error.
To fix this issue, you may want to make the check less restrictive
```julia
NiLangCore.GLOBAL_ATOL[] = 1e-6 # default is 1e-8
```
Or just turn off the check in the program (only if you are sure the program is correct)
```julia
@routine @invcheckoff begin
...
end
```
Turning off the check will make your program faster too!
## What makes the gradient check fails?
##### Finite difference error due to numeric instability
The `NiLang.AD.check_grad` function sometimes fail due to either the rounding error or the finite difference error, you may want to check the gradient manually with the `NiLang.AD.ng` function (numeric gradient).
```julia
julia> NiLang.AD.ng(jin, copy.((out,b,ma,jinzhi,spread,bili)), 6; iloss=1, δ=1e-4)
-5449.643843214744
julia> NiLang.AD.ng(jin, copy.((out,b,ma,jinzhi,spread,bili)), 5; iloss=1, δ=1e-4)
4503-element Array{Float64,1}:
-0.0023380584934784565
-0.0021096593627589755
-0.0019811886886600405
⋮
-0.009526640951662557
-0.006004695478623034
0.0
```
and
```julia
julia> NiLang.AD.gradient(Val(1), jin, copy.((out,b,ma,jinzhi,spread,bili)))[end]
-5449.643116967733
julia> NiLang.AD.gradient(Val(1), jin, copy.((out,b,ma,jinzhi,spread,bili)))[end-1]
4503-element Array{Float64,1}:
-0.0005285958114468947
-0.00030225263725219137
-0.00017545437275561654
⋮
-0.010422627668532736
-0.0069140339974312695
0.0
```
Here, we can see the `jin` function is numerically sensitive to perturbations, which makes the numeric gradient incorrect.
The above code is from https://github.com/HanLi123/NiLang/issues/3
##### Allocating a non-constant ancilla
Another possibility is, a non-constant ancilla is allocated.
```julia
julia> @i function f1(z, y)
x ← y # wrong!
z += x
x → y
end
julia> NiLang.AD.gradient(Val(1), f1, (0.0, 1.0))
(1.0, 0.0)
julia> @i function f2(z, y)
x ← zero(y)
x += y
z += x
x -= y
x → zero(y)
end
julia> NiLang.AD.gradient(Val(1), f2, (0.0, 1.0))
(1.0, 1.0)
```
`f1` will give incorrect gradient because when ancilla `x` is deallocated, its gradient field will also be discarded.
================================================
FILE: docs/src/grammar.md
================================================
# NiLang Grammar
To define a reversible function one can use macro **@i** plus a function definition like bellow
```julia
"""
docstring...
"""
@i function f(args..., kwargs...) where {...}
| $(html(a)) | $(html(b)) |
| $(html(a)) |
| $(html(b)) |
| on tensors | on finite instructions | ||
|---|---|---|---|
| meaning | defining backward rules manully for functions on tensors | defining backward rules on a limited set of basic scalar operations, and generate gradient code using source code transformation | |
| pros and cons |
|
|
|
| packages | Jax PyTorch |
Tapenade Adept NiLang.jl |
""", md"**Evaluating derivatives: principles and techniques of algorithmic differentiation**
By: Griewank, Andreas, and Andrea Walther
(2008)")
# ╔═╡ 4ff09f7c-aeac-48bd-9d58-8446137c3acd
md"""
## The AD ecosystem in Julia
Please check JuliaDiff: [https://juliadiff.org/](https://juliadiff.org/)
A short list:
* Forward mode AD: ForwardDiff.jl
* Reverse mode AD (tensor): ReverseDiff.jl/Zygote.jl
* Reverse mode AD (scalar): NiLang.jl
Warnings
* The main authors of `Tracker`, `ReverseDiff` and `Zygote` are not maintaining them anymore.
"""
#=
| | Rules | Favors Tensor? | Type |
| ---- | ---- | --- | --- |
| Zygote | C | ✓ | R |
| ReverseDiff | D | ✓ | R |
| Nabla | D→C | ✓ | R |
| Tracker | D | ✓ | R |
| Yota | C | ✓ | R |
| NiLang | - | × | R |
| Enzyme | - | × | R |
| ForwardDiff | - | × | F |
| Diffractor | ? | ? | ? |
* R: reverse mode
* F: forward mode
* C: ChainRules
* D: DiffRules
"""
=#
# ╔═╡ ea44037b-9359-4fbd-990f-529d88d54351
md"# Quick summary
1. The history of AD is longer than many people have thought. People are most familar with *reverse mode AD with primitives implemented on tensors* that brings the boom of machine learning. There are also AD frameworks that can differentiate a general program directly, which does not require users defining AD rules manually.
2. **Forward mode AD** propagate gradients forward, it has a computational overhead propotional to the number of input parameters.
2. **Backward mode AD** propagate gradients backward, it has a computational overhead propotional to the number of output parameters.
* primitives on **tensors** v.s. **scalars**
* it is very expensive to reverse the program
4. Julia has one of the most active AD community!
#### Forward v.s. Backward
when is forward mode AD more useful?
* It is often combined with backward mode AD for obtaining Hessians (forward over backward).
* Having <20 input parameters.
when is backward mode AD more useful?
* In most variational optimizations, especially when we are training a neural network with ~ 100M parameters.
"
# ╔═╡ e731a8e3-6462-4a60-83e9-6ab7ddfff50e
md"# How do AD libraries work?"
# ╔═╡ 685c2b28-b071-452c-a881-801128dcb6c3
md"`ForwardDiff` is operator overloading based, many of its overheads can be optimized by Julia's JIT compiler."
# ╔═╡ 177ddfc2-2cbe-4dba-9d05-2857633dd1ae
md"# [Tapenade](http://tapenade.inria.fr:8080/tapenade/index.jsp)
"
# ╔═╡ 6c2a3a93-385f-4758-9b6e-4cb594a8e856
md"## Example 1: Bessel Example"
# ╔═╡ fb8168c2-8489-418b-909b-cede57b5ae64
md"bessel.f90"
# ╔═╡ fdb39284-dbb1-49fa-9a1c-f360f9e6b765
md"""
```fortran
subroutine besselj(res, v, z, atol)
implicit none
integer, intent(in) :: v
real*8, intent(in) :: z, atol
real*8, intent(out) :: res
real*8 :: s
integer :: k, i, factv
k = 0
factv = 1
do i = 2,v
factv = factv * i
enddo
s = (z/2.0)**v / factv
res = s
do while(abs(s) > atol)
k = k + 1
s = -s / k / (k+v) * ((z/2) ** 2)
res = res + s
enddo
endsubroutine besselj
```
"""
# ╔═╡ 60214f22-c8bb-4a32-a882-4e6c727b29a9
md"""
besselj_d.f90 (forward mode)
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.15 (master) - 15 Apr 2020 11:54
!
! Differentiation of besselj in forward (tangent) mode:
! variations of useful results: res
! with respect to varying inputs: z
! RW status of diff variables: res:out z:in
SUBROUTINE BESSELJ_D(res, resd, v, z, zd, atol)
IMPLICIT NONE
INTEGER, INTENT(IN) :: v
REAL*8, INTENT(IN) :: z, atol
REAL*8, INTENT(IN) :: zd
REAL*8, INTENT(OUT) :: res
REAL*8, INTENT(OUT) :: resd
REAL*8 :: s
REAL*8 :: sd
INTEGER :: k, i, factv
INTRINSIC ABS
REAL*8 :: abs0
REAL*8 :: pwx1
REAL*8 :: pwx1d
REAL*8 :: pwr1
REAL*8 :: pwr1d
INTEGER :: temp
k = 0
factv = 1
DO i=2,v
factv = factv*i
END DO
pwx1d = zd/2.0
pwx1 = z/2.0
IF (pwx1 .LE. 0.0 .AND. (v .EQ. 0.0 .OR. v .NE. INT(v))) THEN
pwr1d = 0.0_8
ELSE
pwr1d = v*pwx1**(v-1)*pwx1d
END IF
pwr1 = pwx1**v
sd = pwr1d/factv
s = pwr1/factv
resd = sd
res = s
DO WHILE (.true.)
IF (s .GE. 0.) THEN
abs0 = s
ELSE
abs0 = -s
END IF
IF (abs0 .GT. atol) THEN
k = k + 1
temp = k*(k+v)*(2*2)
sd = -((z**2*sd+s*2*z*zd)/temp)
s = -(s*(z*z)/temp)
resd = resd + sd
res = res + s
ELSE
EXIT
END IF
END DO
END SUBROUTINE BESSELJ_D
```
besselj_b.f90 (backward mode)
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.15 (master) - 15 Apr 2020 11:54
!
! Differentiation of besselj in reverse (adjoint) mode:
! gradient of useful results: res z
! with respect to varying inputs: res z
! RW status of diff variables: res:in-zero z:incr
SUBROUTINE BESSELJ_B(res, resb, v, z, zb, atol)
IMPLICIT NONE
INTEGER, INTENT(IN) :: v
REAL*8, INTENT(IN) :: z, atol
REAL*8 :: zb
REAL*8 :: res
REAL*8 :: resb
REAL*8 :: s
REAL*8 :: sb
INTEGER :: k, i, factv
INTRINSIC ABS
REAL*8 :: abs0
REAL*8 :: tempb
INTEGER :: ad_count
INTEGER :: i0
INTEGER :: branch
k = 0
factv = 1
DO i=2,v
factv = factv*i
END DO
s = (z/2.0)**v/factv
ad_count = 1
DO WHILE (.true.)
IF (s .GE. 0.) THEN
abs0 = s
ELSE
abs0 = -s
END IF
IF (abs0 .GT. atol) THEN
CALL PUSHINTEGER4(k)
k = k + 1
CALL PUSHREAL8(s)
s = -(s/k/(k+v)*(z/2)**2)
ad_count = ad_count + 1
ELSE
GOTO 100
END IF
END DO
CALL PUSHCONTROL1B(0)
GOTO 110
100 CALL PUSHCONTROL1B(1)
110 DO i0=1,ad_count
IF (i0 .EQ. 1) THEN
CALL POPCONTROL1B(branch)
IF (branch .EQ. 0) THEN
sb = 0.0_8
ELSE
sb = 0.0_8
END IF
ELSE
sb = sb + resb
CALL POPREAL8(s)
tempb = -(sb/(k*(k+v)*2**2))
sb = z**2*tempb
zb = zb + 2*z*s*tempb
CALL POPINTEGER4(k)
END IF
END DO
sb = sb + resb
IF (.NOT.(z/2.0 .LE. 0.0 .AND. (v .EQ. 0.0 .OR. v .NE. INT(v)))) zb = &
& zb + v*(z/2.0)**(v-1)*sb/(2.0*factv)
resb = 0.0_8
END SUBROUTINE BESSELJ_B
```
"""
# ╔═╡ 7a6dbe09-cb7f-405f-b9b5-b350ca170e5f
md"## Example 2: Matrix multiplication"
# ╔═╡ 5dc4a849-76dd-4c4f-8828-755671839e5e
md"""
matmul_b.f90
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.16 (develop) - 9 Apr 2021 17:40
!
! Differentiation of mymatmul in reverse (adjoint) mode:
! gradient of useful results: x y z
! with respect to varying inputs: x y z
! RW status of diff variables: x:incr y:incr z:in-out
SUBROUTINE MYMATMUL_B(z, zb, x, xb, y, yb, m, n, o)
IMPLICIT NONE
INTEGER, INTENT(IN) :: m, n, o
REAL*8, DIMENSION(:, :) :: z(m, n)
REAL*8 :: zb(m, n)
REAL*8, DIMENSION(:, :), INTENT(IN) :: x(m, o), y(o, n)
REAL*8 :: xb(m, o), yb(o, n)
REAL*8 :: temp
REAL*8 :: tempb
INTEGER :: i, j, k
DO j=n,1,-1
DO i=m,1,-1
tempb = zb(i, j)
zb(i, j) = 0.0_8
DO k=o,1,-1
xb(i, k) = xb(i, k) + y(k, j)*tempb
yb(k, j) = yb(k, j) + x(i, k)*tempb
END DO
END DO
END DO
END SUBROUTINE MYMATMUL_B
```
"""
# ╔═╡ b053f11b-9ed7-47ff-ab32-0c70b87e71ed
md"## Example 3: Pyramid"
# ╔═╡ 7b1aa6dd-647f-44cb-b580-b58e23e8b5a6
html"""
"""
# ╔═╡ b96bac75-b4ad-45f7-aeec-cb6a387eebf0
md"You will see a lot allocation"
# ╔═╡ 5fe022eb-6a17-466e-a6d0-d67e82af23cd
md"pyramid.f90"
# ╔═╡ 92047e95-7eba-4021-9668-9bb4b92261d7
md"""
```fortran
! Differentiation of pyramid in reverse (adjoint) mode:
! gradient of useful results: v x
! with respect to varying inputs: v x
! RW status of diff variables: v:in-out x:incr
SUBROUTINE PYRAMID_B(v, vb, x, xb, n)
IMPLICIT NONE
INTEGER, INTENT(IN) :: n
REAL*8 :: v(n, n)
REAL*8 :: vb(n, n)
REAL*8, INTENT(IN) :: x(n)
REAL*8 :: xb(n)
INTEGER :: i, j
INTRINSIC SIN
INTRINSIC COS
INTEGER :: ad_to
DO j=1,n
v(1, j) = x(j)
END DO
DO i=1,n-1
DO j=1,n-i
CALL PUSHREAL8(v(i+1, j))
v(i+1, j) = SIN(v(i, j))*COS(v(i, j+1))
END DO
CALL PUSHINTEGER4(j - 1)
END DO
DO i=n-1,1,-1
CALL POPINTEGER4(ad_to)
DO j=ad_to,1,-1
CALL POPREAL8(v(i+1, j))
vb(i, j) = vb(i, j) + COS(v(i, j))*COS(v(i, j+1))*vb(i+1, j)
vb(i, j+1) = vb(i, j+1) - SIN(v(i, j+1))*SIN(v(i, j))*vb(i+1, j)
vb(i+1, j) = 0.0_8
END DO
END DO
DO j=n,1,-1
xb(j) = xb(j) + vb(1, j)
vb(1, j) = 0.0_8
END DO
END SUBROUTINE PYRAMID_B
```
"""
# ╔═╡ e2ae1084-8759-4f27-8ad1-43a88e434a3d
md"## How does NiLang avoid too many allocation?"
# ╔═╡ edd3aea8-abdb-4e12-9ef9-12ac0fff835b
@i function pyramid!(y!, v!, x::AbstractVector{T}) where T
@safe @assert size(v!,2) == size(v!,1) == length(x)
@inbounds for j=1:length(x)
v![1,j] += x[j]
end
@invcheckoff @inbounds for i=1:size(v!,1)-1
for j=1:size(v!,2)-i
@routine begin
@zeros T c s
c += cos(v![i,j+1])
s += sin(v![i,j])
end
v![i+1,j] += c * s
~@routine
end
end
y! += v![end,1]
end
# ╔═╡ a2904efb-186c-449d-b1aa-caf530f88e91
@i function power(x3, x)
@routine begin
x2 ← zero(x)
x2 += x^2
end
x3 += x2 * x
~@routine
end
# ╔═╡ 14faaf82-ad3e-4192-8d48-84adfa30442d
ex = NiLangCore.precom_ex(NiLang, :(for j=1:size(v!,2)-i
@routine begin
@zeros T c s
c += cos(v![i,j+1])
s += sin(v![i,j])
end
v![i+1,j] += c * s
~@routine
end)) |> NiLangCore.rmlines
# ╔═╡ 5d141b88-ec07-4a02-8eb3-37405e5c9f5d
NiLangCore.dual_ex(NiLang, ex)
# ╔═╡ 0907e683-f216-4cf6-a210-ae5181fdc487
function pyramid0!(v!, x::AbstractVector{T}) where T
@assert size(v!,2) == size(v!,1) == length(x)
for j=1:length(x)
v![1,j] = x[j]
end
@inbounds for i=1:size(v!,1)-1
for j=1:size(v!,2)-i
v![i+1,j] = cos(v![i,j+1]) * sin(v![i,j])
end
end
end
# ╔═╡ 0bbfa106-f465-4a7b-80a7-7732ba435822
x = randn(20);
# ╔═╡ 805c7072-98fa-4086-a69d-2e126c55af36
let
@benchmark pyramid0!(v, x) seconds=1 setup=(x=randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ 7e527024-c294-4c16-8626-9953588d9b6a
let
@benchmark pyramid!(0.0, v, x) seconds=1 setup=(x=10*randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ 3e59c65a-ceed-42ed-be64-a6964db016e7
pyramid!(0.0, zeros(20, 20), x)
# ╔═╡ 29f85d05-99fd-4843-9be0-5663e681dad7
html"""
"""
# ╔═╡ e7830e55-bd9e-4a8a-9239-4191a5f0b1d1
let
@benchmark NiLang.AD.gradient(Val(1), pyramid!, (0.0, v, x)) seconds=1 setup=(x=randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ de2cd247-ba68-4ba4-9784-27a743478635
md"## NiLang's implementation"
# ╔═╡ dc929c23-7434-4848-847a-9fa696e84776
md"""
```math
\begin{align}
&v_{−1} &= & x_1 &=&1.5000\\
&v_0 &= & x_2 &=&0.5000\\
&v_1 &= & v_{−1}/v_0 &=&1.5000/0.5000 &= 3.0000\\
&v_2 &= & \sin(v1)&=& \sin(3.0000) &= 0.1411\\
&v_3 &= & \exp(v0)&=& \exp(0.5000) &= 1.6487\\
&v_4 &= & v_1 − v_3 &=&3.0000 − 1.6487 &= 1.3513\\
&v_5 &= & v_2 + v_4 &=&0.1411 + 1.3513 &= 1.4924\\
&v_6 &= & v_5 ∗ v_4 &=&1.4924 ∗ 1.3513 &= 2.0167\\
&y &= & v_6 &=&2.0167
\end{align}
```
"""
# ╔═╡ 4f1df03f-c315-47b1-b181-749e1231594c
html"""
"""
# ╔═╡ 7eccba6a-3ad5-440b-9c5d-392dc8dc7aba
@i function example_linear(y::T, x1::T, x2::T) where T
@routine begin
@zeros T v1 v2 v3 v4 v5
v1 += x1 / x2
v2 += sin(v1)
v3 += exp(x2)
v4 += v1 - v3
v5 += v2 + v4
end
y += v5 * v4
~@routine
end
# ╔═╡ 4a858a3e-ce28-4642-b061-3975a3ed99ff
md"NOTES:
* a statement changes values inplace directly,
* no return statement, returns the input arguments directly
* `@routine
"""
# ╔═╡ 2192a1de-1042-4b13-a313-b67de489124c
md"""
1. Devide the program into ``\delta`` segments, each segment having size $\eta(\delta, \tau) = \frac{(\delta+\tau)!}{\delta! \tau!}$, where ``\delta=1,...,d`` and ``\tau=t-1``.
2. Cache the first state of each segment,
3. Compute gradients in the last segment,
4. Deallocate last checkpoint,
5. Devide the second last segments into two parts.
6. Recursively apply treeverse (Step 2-5).
"""
# ╔═╡ 01c709c7-806c-4389-bbb2-4081e64426d9
md"total number of steps ``T = \eta(d, t)``, both ``t`` and ``d`` can be logarithmic"
# ╔═╡ b1e0cf83-4337-4044-a7d1-5fca8ae79268
md"## An example"
# ╔═╡ 71f4b476-027d-4c8f-b561-1ee418bc9e61
html"""
", md"**Matrix computations**
Golub, Gene H., and Charles F. Van Loan (2013)")
# ╔═╡ 4d373cf6-9b39-44bc-8f13-220933fc8f5c
function qrfactPivotedUnblocked!(A::AbstractMatrix)
m, n = size(A)
piv = Vector(UnitRange{BlasInt}(1,n))
τ = Vector{eltype(A)}(undef, min(m,n))
for j = 1:min(m,n)
# Find column with maximum norm in trailing submatrix
jm = indmaxcolumn(view(A, j:m, j:n)) + j - 1
if jm != j
# Flip elements in pivoting vector
tmpp = piv[jm]
piv[jm] = piv[j]
piv[j] = tmpp
# Update matrix with
for i = 1:m
tmp = A[i,jm]
A[i,jm] = A[i,j]
A[i,j] = tmp
end
end
# Compute reflector of columns j
x = view(A, j:m, j)
τj = LinearAlgebra.reflector!(x)
τ[j] = τj
# Update trailing submatrix with reflector
LinearAlgebra.reflectorApply!(x, τj, view(A, j:m, j+1:n))
end
return LinearAlgebra.QRPivoted{eltype(A), typeof(A)}(A, τ, piv)
end
# ╔═╡ 293a68ca-e02f-47b3-85ed-aeeb8995f3ec
struct Reflector{T,RT,VT<:AbstractVector{T}}
ξ::T
normu::RT
sqnormu::RT
r::T
y::VT
end
# ╔═╡ fa5716f9-8bff-4295-812b-691ccdc12832
struct QRPivotedRes{T,RT,VT}
factors::Matrix{T}
τ::Vector{T}
jpvt::Vector{Int}
reflectors::Vector{Reflector{T,RT,VT}}
vAs::Vector{Vector{T}}
jms::Vector{Int}
end
# ╔═╡ 8324f365-fd12-4ca3-8ca6-657e5917f946
# Elementary reflection similar to LAPACK. The reflector is not Hermitian but
# ensures that tridiagonalization of Hermitian matrices become real. See lawn72
@i function reflector!(R::Reflector{T,RT}, x::AbstractVector{T}) where {T,RT}
n ← length(x)
@inbounds @invcheckoff if n != 0
@zeros T ξ1
@zeros RT normu sqnormu
ξ1 += x[1]
sqnormu += abs2(ξ1)
for i = 2:n
sqnormu += abs2(x[i])
end
if !iszero(sqnormu)
normu += sqrt(sqnormu)
if real(ξ1) < 0
NEG(normu)
end
ξ1 += normu
R.y[1] -= normu
for i = 2:n
R.y[i] += x[i] / ξ1
end
R.r += ξ1/normu
end
SWAP(R.ξ, ξ1)
SWAP(R.normu, normu)
SWAP(R.sqnormu, sqnormu)
end
end
# ╔═╡ 70fb10ea-9229-46ef-8ba3-b1d3874b7929
# apply reflector from left
@i function reflectorApply!(vA::AbstractVector{T}, x::AbstractVector, τ::Number, A::StridedMatrix{T}) where T
(m, n) ← size(A)
if length(x) != m || length(vA) != n
@safe throw(DimensionMismatch("reflector has length ($(length(x)), $(length(vA))), which must match the first dimension of matrix A, ($m, $n)"))
end
@inbounds @invcheckoff if m != 0
for j = 1:n
# dot
@zeros T vAj vAj_τ
vAj += A[1, j]
for i = 2:m
vAj += x[i]'*A[i, j]
end
vAj_τ += τ' * vAj
# ger
A[1, j] -= vAj_τ
for i = 2:m
A[i, j] -= x[i]*vAj_τ
end
vAj_τ -= τ' * vAj
SWAP(vA[j], vAj)
end
end
end
# ╔═╡ 51504ba4-4711-48b7-aab9-d4f26c009659
function alloc(::typeof(reflector!), x::AbstractVector{T}) where T
RT = real(T)
Reflector(zero(T), zero(RT), zero(RT), zero(T), zero(x))
end
# ╔═╡ f267e315-3c19-4345-8fba-641bb0ea515b
@i function qr_pivoted!(res::QRPivotedRes, A::StridedMatrix{T}) where T
m, n ← size(A)
@invcheckoff @inbounds for j = 1:min(m,n)
# Find column with maximum norm in trailing submatrix
jm ← LinearAlgebra.indmaxcolumn(NiLang.value.(view(A, j:m, j:n))) + j - 1
if jm != j
# Flip elements in pivoting vector
SWAP(res.jpvt[jm], res.jpvt[j])
# Update matrix with
for i = 1:m
SWAP(A[i, jm], A[i, j])
end
end
# Compute reflector of columns j
R ← alloc(reflector!, A |> subarray(j:m, j))
vA ← zeros(T, n-j)
reflector!(R, A |> subarray(j:m, j))
# Update trailing submatrix with reflector
reflectorApply!(vA, R.y, R.r, A |> subarray(j:m, j+1:n))
for i=1:length(R.y)
SWAP(R.y[i], A[j+i-1, j])
end
PUSH!(res.reflectors, R)
PUSH!(res.vAs, vA)
PUSH!(res.jms, jm)
R → _zero(Reflector{T,real(T),Vector{T}})
vA → zeros(T, 0)
jm → 0
end
@inbounds for i=1:length(res.reflectors)
res.τ[i] += res.reflectors[i].r
end
res.factors += A
end
# ╔═╡ a07b93b1-742b-41d4-bd0f-bc899de55338
function alloc_qr(A::AbstractMatrix{T}) where T
(m, n) = size(A)
τ = zeros(T, min(m,n))
jpvt = collect(1:n)
reflectors = Reflector{T,real(T),Vector{T}}[]
vAs = Vector{T}[]
jms = Int[]
QRPivotedRes(zero(A), τ, jpvt, reflectors, vAs, jms)
end
# ╔═╡ 5f207f59-b9f4-477f-b79f-0aee743bdb8e
A = randn(ComplexF64, 20, 20);
# ╔═╡ f88517d6-b87d-45ba-bf3f-67074fa51fca
@test qr_pivoted!(alloc_qr(A), copy(A))[1].factors ≈ LinearAlgebra.qrfactPivotedUnblocked!(copy(A)).factors
# ╔═╡ 45aef837-9b2c-49b2-b815-e4d60f103f58
let
@testset "qr pivoted gradient" begin
# rank deficient initial matrix
n = 50
U = LinearAlgebra.qr(randn(n, n)).Q
Σ = Diagonal((x=randn(n); x[n÷2+1:end] .= 0; x))
A = U*Σ*U'
res = alloc_qr(A)
@test rank(A) == n ÷ 2
qrres = qr_pivoted!(deepcopy(res), copy(A))[1]
@test count(x->(x>1e-12), sum(abs2, QRPivoted(qrres.factors, qrres.τ, qrres.jpvt).R, dims=2)) == n ÷ 2
@i function loss(y, qrres, A)
qr_pivoted!(qrres, A)
y += abs(qrres.factors[1])
end
nrloss(A) = loss(0.0, deepcopy(res), A)[1]
ngA = zero(A)
δ = 1e-5
for j=1:size(A, 2)
for i=1:size(A, 1)
A_ = copy(A)
A_[i,j] -= δ/2
l1 = nrloss(copy(A_))
A_[i,j] += δ
l2 = nrloss(A_)
ngA[i,j] = (l2-l1)/δ
end
end
gA = NiLang.AD.gradient(loss, (0.0, res, A); iloss=1)[3]
@test real.(gA) ≈ ngA
end
end
# ╔═╡ Cell order:
# ╟─a1ef579e-4b66-4042-944e-7e27c660095e
# ╟─100b4293-fd1e-4b9c-a831-5b79bc2a5ebe
# ╟─f11023e5-8f7b-4f40-86d3-3407b61863d9
# ╟─9d11e058-a7d0-11eb-1d78-6592ff7a1b43
# ╟─b73157bf-1a77-47b8-8a06-8d6ec2045023
# ╟─ec13e0a9-64ff-4f66-a5a6-5fef53428fa1
# ╟─f8b0d1ce-99f7-4729-b46e-126da540cbbe
# ╟─435ac19e-1c0c-4ee5-942d-f2a97c8c4d80
# ╟─48ecd619-d01d-43ff-8b52-7c2566c3fa2b
# ╟─4878ce45-40ff-4fae-98e7-1be41e930e4d
# ╠═ce44f8bd-692e-4eab-9ba4-055b25e40c81
# ╠═b2c1936c-2c27-4fbb-8183-e38c5e858483
# ╠═8be1b812-fcac-404f-98aa-0571cb990f34
# ╟─33e0c762-c75e-44aa-bfe2-bff92dd1ace8
# ╟─c59c35ee-1907-4736-9893-e22c052150ca
# ╠═0ae13734-b826-4dbf-93d1-11044ce88bd4
# ╠═99187515-c8be-49c2-8d70-9c2998d9993c
# ╟─78ca6b08-84c4-4e4d-8412-ae6c28bfafce
# ╠═f12b25d8-7c78-4686-b46d-00b34e565605
# ╟─d90c3cc9-084d-4cf7-9db7-42cea043030b
# ╟─93c98cb2-18af-47df-afb3-8c5a34b4723c
# ╟─2dc74e15-e2ea-4961-b43f-0ada1a73d80a
# ╟─7ee75a15-eaea-462a-92b6-293813d2d4d7
# ╟─02a25b73-7353-43b1-8738-e7ca472d0cc7
# ╟─2afb984f-624e-4381-903f-ccc1d8a66a17
# ╟─7e5d5e69-90f2-4106-8edf-223c150a8168
# ╟─92d7a938-9463-4eee-8839-0b8c5f762c79
# ╟─4b1a0b59-ddc6-4b2d-b5f5-d92084c31e46
# ╟─81f16b8b-2f0b-4ba3-8c26-6669eabf48aa
# ╟─fb6c3a48-550a-4d2e-a00b-a1e40d86b535
# ╟─ab6fa4ac-29ed-4722-88ed-fa1caf2072f3
# ╟─8e72d934-e307-4505-ac82-c06734415df6
# ╟─e6ff86a9-9f54-474b-8111-a59a25eda506
# ╟─9c1d9607-a634-4350-aacd-2d40984d647d
# ╟─63db2fa2-50b2-4940-b8ee-0dc6e3966a57
# ╟─693167e7-e80c-401d-af89-55b5fae30848
# ╟─4cd70901-2142-4868-9a33-c46ca0d064ec
# ╟─89018a35-76f4-4f23-b15a-a600db046d6f
# ╟─1d219222-0778-4c37-9182-ed5ccbb3ef32
# ╟─4ff09f7c-aeac-48bd-9d58-8446137c3acd
# ╟─ea44037b-9359-4fbd-990f-529d88d54351
# ╟─e731a8e3-6462-4a60-83e9-6ab7ddfff50e
# ╟─685c2b28-b071-452c-a881-801128dcb6c3
# ╟─177ddfc2-2cbe-4dba-9d05-2857633dd1ae
# ╟─6c2a3a93-385f-4758-9b6e-4cb594a8e856
# ╟─fb8168c2-8489-418b-909b-cede57b5ae64
# ╟─fdb39284-dbb1-49fa-9a1c-f360f9e6b765
# ╟─60214f22-c8bb-4a32-a882-4e6c727b29a9
# ╟─7a6dbe09-cb7f-405f-b9b5-b350ca170e5f
# ╟─5dc4a849-76dd-4c4f-8828-755671839e5e
# ╟─b053f11b-9ed7-47ff-ab32-0c70b87e71ed
# ╟─7b1aa6dd-647f-44cb-b580-b58e23e8b5a6
# ╟─b96bac75-b4ad-45f7-aeec-cb6a387eebf0
# ╟─5fe022eb-6a17-466e-a6d0-d67e82af23cd
# ╟─92047e95-7eba-4021-9668-9bb4b92261d7
# ╟─e2ae1084-8759-4f27-8ad1-43a88e434a3d
# ╠═edd3aea8-abdb-4e12-9ef9-12ac0fff835b
# ╠═a2904efb-186c-449d-b1aa-caf530f88e91
# ╠═14faaf82-ad3e-4192-8d48-84adfa30442d
# ╠═5d141b88-ec07-4a02-8eb3-37405e5c9f5d
# ╠═0907e683-f216-4cf6-a210-ae5181fdc487
# ╠═805c7072-98fa-4086-a69d-2e126c55af36
# ╠═7e527024-c294-4c16-8626-9953588d9b6a
# ╠═0bbfa106-f465-4a7b-80a7-7732ba435822
# ╠═3e59c65a-ceed-42ed-be64-a6964db016e7
# ╟─29f85d05-99fd-4843-9be0-5663e681dad7
# ╠═9a46597c-b1ee-4e3b-aed1-fd2874b6e77a
# ╠═e7830e55-bd9e-4a8a-9239-4191a5f0b1d1
# ╟─de2cd247-ba68-4ba4-9784-27a743478635
# ╟─dc929c23-7434-4848-847a-9fa696e84776
# ╟─4f1df03f-c315-47b1-b181-749e1231594c
# ╠═ccd38f52-104d-434a-aea3-dd94e571374f
# ╠═7eccba6a-3ad5-440b-9c5d-392dc8dc7aba
# ╠═f4230251-ba54-434a-b86b-f972c7389217
# ╟─4a858a3e-ce28-4642-b061-3975a3ed99ff
# ╠═674bb3bb-637b-44f2-bf6d-d1678da03fbd
# ╠═5a59d96f-b2f1-4564-82c7-7f0fe181afb8
# ╠═55d2f8ee-4f77-4d44-b704-30643dbbab84
# ╠═14951168-97c2-43ae-8d5e-5506408a2bb2
# ╠═4f564581-6032-449c-8b15-3c741f44237a
# ╠═a36516e8-76c1-4bff-8a12-3e1e621b857d
# ╠═402b861c-d363-4d23-b9e9-eb088f57b5c4
# ╠═63975a80-1b41-4f55-91a1-4a316ad7bf26
# ╠═6f688f88-432a-42b2-a2db-19d6bb282e0a
# ╠═fb46db14-f7e0-4f01-9096-02334c62942d
# ╟─b2c3db3d-c250-4daa-8453-3c9a2734aede
# ╠═69dc2685-b70f-4a81-af30-f02e0054bd52
# ╠═9a986264-5ba7-4697-a00d-711f8efe29f0
# ╠═560cf3e9-0c14-4497-85b9-f07045eea32a
# ╠═8ab79efc-e8d0-4c6f-81df-a89008142bb7
# ╠═0eec318c-2c09-4dd6-9187-9c0273d29915
# ╠═1f0ef29c-0ad5-4d97-aeed-5ff44e86577a
# ╠═603d8fc2-5e7b-4d55-92b6-208b25ea6569
# ╟─2b3c765e-b505-4f07-9bcb-3c8cc47364ad
# ╠═e0f266da-7e65-4398-bfd4-a6c0b54e626b
# ╟─e1d35886-79d0-40a5-bd33-1c4e5f4a0a9a
# ╠═b63a30b0-c75b-4998-a2b2-0b79574cab81
# ╟─139bf020-c4a8-45c8-96fa-aeebc7ddaedc
# ╠═8967c0f0-89f8-4893-b11b-253333d1a823
# ╟─f2540450-5a07-4fb8-93fb-a6d48dd36a56
# ╠═3acb2cfd-fa29-4a2b-8f23-f5aaf474edd0
# ╟─aa1547f2-5edd-4b7e-b93e-bdfc4e4fc6d5
# ╟─6e76a107-4f51-4e32-b133-7b6e04d7d107
# ╟─999f7a8f-d72e-4ccd-8cbf-b5bbb7db1842
# ╟─32772c2a-6b80-4779-963c-06974ff0d832
# ╟─41642bd5-1321-490a-95ad-4c1d6363456f
# ╟─2a553e32-05ef-4c2d-aba7-41185c6035d4
# ╟─ab8345ce-e038-4d6b-9e1f-57e4f33bb67b
# ╟─bb9c9a4c-601a-4708-9b2d-04d1583938f2
# ╟─b9917e94-c33d-423f-a478-3252bacc2494
# ╟─4978f404-11ff-41b8-a673-f2d051b1f526
# ╟─73bd2e3b-902f-461b-860f-246257608ecd
# ╟─4dd47dc8-6dfa-47a4-a088-689b4b870762
# ╟─ecd975d2-9374-4f40-80ac-2cceda11e7fb
# ╟─832cc81d-a49d-46e7-9d2b-d8bde9bb1273
# ╟─2192a1de-1042-4b13-a313-b67de489124c
# ╟─01c709c7-806c-4389-bbb2-4081e64426d9
# ╟─b1e0cf83-4337-4044-a7d1-5fca8ae79268
# ╟─71f4b476-027d-4c8f-b561-1ee418bc9e61
# ╟─042013cf-9cd2-409d-827f-a311a2f8ce62
# ╟─82593cd0-1403-4597-8370-919c80494479
# ╟─f58720b5-2bcb-4950-b453-bd59f648c66a
# ╟─4576d791-6af7-4ba5-9b80-fe99c0bb2e88
# ╟─6e9d17f1-b17d-4e8d-82a3-921558a20c0f
# ╟─f18d89f5-1129-43e0-8b4a-5c1fcd618eab
# ╟─2912c7ed-75e3-4dfd-9c40-92115cc08194
# ╟─5d1517c0-562b-40db-bec2-32b5494de1b8
# ╟─ae096ad2-3ae9-4440-a959-0d7d9a174f1d
# ╟─8148bc1f-ef99-40a4-a5ce-0a42643f703d
# ╠═200f1848-0980-4185-919a-93ab2e7f788f
# ╠═bd86c5c2-16be-4cfd-ba7a-a0e2544d82d1
# ╟─11557d6b-3a1e-416d-874f-b8d217976f76
# ╟─48a10ea2-5d32-4a55-b8c0-f6a5e82eace9
# ╟─fafc1b0f-6469-4b6c-a00d-5272a45fc69b
# ╟─ad6cff7b-5cbf-4ab1-94f7-d21cbc171000
# ╠═30c191c5-642b-4062-98f3-643d314a054d
# ╠═fa5716f9-8bff-4295-812b-691ccdc12832
# ╠═f267e315-3c19-4345-8fba-641bb0ea515b
# ╠═4d373cf6-9b39-44bc-8f13-220933fc8f5c
# ╠═293a68ca-e02f-47b3-85ed-aeeb8995f3ec
# ╠═8324f365-fd12-4ca3-8ca6-657e5917f946
# ╠═70fb10ea-9229-46ef-8ba3-b1d3874b7929
# ╠═51504ba4-4711-48b7-aab9-d4f26c009659
# ╠═a07b93b1-742b-41d4-bd0f-bc899de55338
# ╠═864dbde7-b689-4165-a08e-6bbbd72190de
# ╠═5f207f59-b9f4-477f-b79f-0aee743bdb8e
# ╠═f88517d6-b87d-45ba-bf3f-67074fa51fca
# ╠═45aef837-9b2c-49b2-b815-e4d60f103f58
================================================
FILE: notebooks/basic.jl
================================================
### A Pluto.jl notebook ###
# v0.14.5
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ 1ef174fa-16f0-11eb-328a-afc201effd2f
using Pkg, Printf
# ╔═╡ 55cfdab8-d792-11ea-271f-e7383e19997c
using PlutoUI;
# ╔═╡ 9e509f80-d485-11ea-0044-c5b7e750aacb
using NiLang
# ╔═╡ 37ed073a-d492-11ea-156f-1fb155128d0f
using Zygote, BenchmarkTools
# ╔═╡ 4d75f302-d492-11ea-31b9-bbbdb43f344e
using NiLang.AD
# ╔═╡ 627ea2fb-6530-4ea0-98ee-66be3db54411
html"""
"""
# ╔═╡ 94b2b962-e02a-11ea-09a5-81b3226891ed
md"""# 连猩猩都能懂的可逆编程
### (Reversible programming made simple)
[https://github.com/JuliaReverse/NiLangTutorial/](https://github.com/JuliaReverse/NiLangTutorial/)
$(html"
for i=start:step:stop
# do something
end
for i=stop:-step:start
# undo something
end
if (precondition, postcondition)
# do A
else
# do B
end
if (postcondition, precondition)
# undo A
else
# undo B
end

"""
# ╔═╡ cc0d5622-d788-11ea-19cd-3bf6864d9263
md"""##### Including NiLang.AD
"""
# ╔═╡ a1646ef0-e091-11ea-00f1-e7c246e191ff
md"## 3. Solve the memory wall problem in machine learning"
# ╔═╡ b18b3ae8-e091-11ea-24a1-e968b70b217c
html"""
Learning a ring distribution with NICE network, before and after training
| $llabel | $rlabel |
| $(html(left)) | $(html(right)) |
| $(html(a)) | $(html(b)) |
| $(html(a)) |
| $(html(b)) |
Harvard university

"""
# ╔═╡ 7ee8cfc9-26b2-4fe4-8263-1f4d2f7c276d
md"Initially written by GiggleLiu and Taine Zhao (The author of MLStyle)"
# ╔═╡ 1669f5e3-efe1-4b79-a2b6-11ed7476a2a1
md"the program of finding the maximum number"
# ╔═╡ 5000f4c3-5416-4e53-88ae-e30d8d09827e
@i function i_find_maximum_v1(s₂, s₃, s₄, x₁, x₂, x₃, x₄) where T
s₂ += max(x₁, x₂) # step 1
s₃ += max(s₂, x₃) # step 2
s₄ += max(s₃, x₄) # step 3
end
# ╔═╡ 2413c061-89de-403f-8011-e458f5a9859d
i_find_maximum_v1(0, 0, 0, 3, 2, 8, 1)
# ╔═╡ d4704779-9261-478b-bbf6-551220783e12
md"the basic building block of compute-copy-uncompute"
# ╔═╡ 4be065b5-0841-4d54-b9ab-d6770d4d9d94
@i function i_find_maximum_v2(s₄, x₁, x₂, x₃, x₄) where T
# compute
s₂ ← 0 # variable on the working tape
s₃ ← 0
s₂ += max(x₁, x₂) # step 1
s₃ += max(s₂, x₃) # step 2
# copy
s₄ += max(s₃, x₄) # step 3
# uncompute
s₃ -= max(s₂, x₃) # step 4
s₂ -= max(x₁, x₂) # step 5
s₂ → 0
s₃ → 0
end
# ╔═╡ e850e53d-cf61-4fc7-9cb3-e318ae957f0b
i_find_maximum_v2(0, 3, 2, 8, 1)
# ╔═╡ a267ea5f-8bd5-4ee0-9c8d-47e2d3b81692
TikzPicture(L"""
\def\r{0.15};
\foreach \x in {1,4}{
\fill[fill=black] (\x, 0) circle [radius=\r];
\node[white] at (\x, 0) {$s_{\x}$};
}
\foreach \x in {2,3}{
\draw (\x, 0) circle [radius=\r];
\node[black] at (\x, 0) {$s_{\x}$};
}
\fill[fill=white] (5.5, 0) circle [radius=\r];
\foreach \x in {1,...,3}{
\draw [black, thick, ->] (\x+\r, \r) .. controls (\x+0.5, 0.3) .. (\x+1-\r, \r);
\node at (\x+0.5, 0.4) {\x};
}
\foreach[evaluate={\y=int(6-\x)}] \x in {1,...,2}{
\draw [red, thick, <-] (\x+\r, -\r) .. controls (\x+0.5, -0.3) .. (\x+1-\r, -\r);
\node at (\x+0.5, -0.4) {\y};
}
"""
, options="scale=1.8", preamble="")
# ╔═╡ c02520a3-3375-4d83-a0dc-1aeac2aa7d5f
md"Recursively apply Bennett's time space tradeoff scheme"
# ╔═╡ 2e18fc92-4185-493b-9ce8-cca63dad7d2d
TikzPicture(L"""
\def\r{0.15};
\def\n{10};
\foreach \x in {1,4,7,10}{
\fill[fill=black] (\x, 0) circle [radius=\r];
\node[white] at (\x, 0) {$s_{\x}$};
}
\foreach \x in {2,3,5,6,8,9}{
\draw (\x, 0) circle [radius=\r];
\node[black] at (\x, 0) {$s_{\x}$};
}
\fill[fill=white] (\n+0.5, 0) circle [radius=\r];
\foreach \x/\t in {1/1,2/2,3/3,4/6,5/7,6/8,7/11,8/12,9/13}{
\draw [black, thick, ->] (\x+\r, \r) .. controls (\x+0.5, 0.3) .. (\x+1-\r, \r);
\node[black] at (\x+0.5, 0.4) {\t};
}
\foreach \x/\t in {1/5,2/4,4/10,5/9,7/15,8/14}{
\draw [black, thick, <-] (\x+\r, -\r) .. controls (\x+0.5, -0.3) .. (\x+1-\r, -\r);
\node[black] at (\x+0.5, -0.4) {\t};
}
"""
, options="scale=2.0", preamble="")
# ╔═╡ acc7b185-e4df-4aca-aa42-554215065384
TikzPicture(L"""
\def\r{0.15};
\def\n{10};
\foreach \x in {1,4,7,10}{
\fill[fill=black] (\x, 0) circle [radius=\r];
\node[white] at (\x, 0) {$s_{\x}$};
}
\fill[fill=white] (\n+0.5, 0) circle [radius=\r];
\foreach \x in {1,4,7}{
\draw [black, thick, ->] (\x+\r, \r) .. controls (\x+1.5, 0.6) .. (\x+3-\r, \r);
}
\foreach \x in {1,4}{
\draw [black, thick, <-] (\x+\r, -\r) .. controls (\x+1.5, -0.6) .. (\x+3-\r, -\r);
}
"""
, options="scale=2.0", preamble="")
# ╔═╡ 00c9e973-7e06-4483-bf4c-be7374707118
md"
* Space complexity: ``O(S \log T)``
* Time complexity: ``O(T^{1+\epsilon})``"
# ╔═╡ 83ff3fc3-bcd8-4235-a42f-1d75c7d6aa5b
md"## Computing architectures"
# ╔═╡ b308e270-6b40-4946-ac92-c705823f2c1e
let
txt1 = md"Traditional irreversible computer
$E \sim 10^8 kT$"
img1 = html"""
"""
txt2 = md"DNA copying is a living copy machine
$E \sim 100k T$"
img2 = html"""
"""
txt3 = md"""
Adiabatic CMOS [Athas, 1994]
$E \sim 10^6 kT$
"""
img3 = html"""
"""
txt4 = md"""Adiabatic superconducting devices [Takeuchi, 2014]
$E \sim kT$
"""
img4 = html"""
"""
updown(leftright(updown(img1, txt1), updown(img2, txt2)), leftright(updown(img3, txt3), updown(img4, txt4)))
end
# ╔═╡ e483b3d4-d01c-4a98-8e68-e8120a7d95a7
md"## More

*Youtube*: Michael P. Frank: Fundamental Physics of Reversible Computing — An Introduction, Part 1
"
# ╔═╡ 74017e78-0f02-41bb-a160-5f2d26c18268
md"""

Kenichi Morita, How can we construct reversible Turing machines in a very simple reversible cellular automaton? Video can be found in this conference page: [https://reversible-computation-2021.github.io/program/](https://reversible-computation-2021.github.io/program/)
"""
# ╔═╡ 0b3735c2-695c-4225-843e-16ca17aac0eb
md"""## Take home message
1. Irreversible computing -> information erasure -> disspate heat (``kT\log 2`` per bit)
2. Reversible computing -> requires operations being adiabatic -> slow
2. Reversible programming suffers from **polynomial time overhead and logarithmic space overhead** when differentiating a irreversible linear program
3. Brownian computer
* mRNA copy
* Magnetic dopile
4. General reversible computer
* Billiard ball model
* Reversible cellular automata
* Adiabatic CMOS
6. **How to find this notebook?** In NiLang's Github repo, file: `notebooks/feynman.jl`
"""
# ╔═╡ d7942b37-f821-494a-8f18-5f267aa3457a
md"""
### References
* Reeb, David, and Michael M. Wolf. "An improved Landauer principle with finite-size corrections." (2014).
* Athas, William C., and L. J. Svensson. "Reversible logic issues in adiabatic CMOS." Proceedings Workshop on Physics and Computation. (1994).
* Takeuchi, N., Y. Yamanashi, and N. Yoshikawa. "Reversible logic gate using adiabatic superconducting devices." (2014)
* Griewank, Andreas. "Achieving logarithmic growth of temporal and spatial complexity in reverse automatic differentiation." Optimization Methods and software 1.1 (1992): 35-54.
* Ming Li, John Tromp, Paul Vitanyi. "Reversible Simulation of Irreversible Computation by Pebble Games" (1997)
"""
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
Compose = "a81c6b42-2e10-5240-aca2-a61377ecd94b"
NiLang = "ab4ef3a6-0b42-11ea-31f6-e34652774712"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
TikzPictures = "37f6aa50-8035-52d0-81c2-5a1d08754b2d"
Viznet = "52a3aca4-6234-47fd-b74a-806bdf78ede9"
[compat]
Compose = "~0.9.2"
NiLang = "~0.9.1"
Plots = "~1.18.0"
PlutoUI = "~0.7.9"
Revise = "~3.1.17"
TikzPictures = "~3.3.3"
Viznet = "~0.3.3"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.6+5"
[[Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.0+6"
[[CodeTracking]]
deps = ["InteractiveUtils", "UUIDs"]
git-tree-sha1 = "8ad457cfeb0bca98732c97958ef81000a543e73e"
uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
version = "1.0.5"
[[ColorSchemes]]
deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"]
git-tree-sha1 = "c8fd01e4b736013bc61b704871d20503b33ea402"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.12.1"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "32a2b8af383f11cbb65803883837a149d10dfe8a"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.10.12"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dc7dedc2c2aa9faf59a55c622760a25cbefbe941"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.31.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[Compose]]
deps = ["Base64", "Colors", "DataStructures", "Dates", "IterTools", "JSON", "LinearAlgebra", "Measures", "Printf", "Random", "Requires", "Statistics", "UUIDs"]
git-tree-sha1 = "c6461fc7c35a4bb8d00905df7adafcff1fe3a6bc"
uuid = "a81c6b42-2e10-5240-aca2-a61377ecd94b"
version = "0.9.2"
[[Contour]]
deps = ["StaticArrays"]
git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.5.7"
[[DataAPI]]
git-tree-sha1 = "ee400abb2298bd13bfc3df1c412ed228061a2385"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.7.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.9"
[[DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[Dierckx]]
deps = ["Dierckx_jll"]
git-tree-sha1 = "5fefbe52e9a6e55b8f87cb89352d469bd3a3a090"
uuid = "39dd38d3-220a-591b-8e3c-4c3a8c710a94"
version = "0.5.1"
[[Dierckx_jll]]
deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"]
git-tree-sha1 = "a580560f526f6fc6973e8bad2b036514a4e3b013"
uuid = "cd4c43a9-7502-52ba-aa6d-59fb2a88580b"
version = "0.0.1+0"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[EarCut_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d"
uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5"
version = "2.1.5+1"
[[Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.2.10+0"
[[FFMPEG]]
deps = ["FFMPEG_jll"]
git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8"
uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a"
version = "0.4.1"
[[FFMPEG_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"]
git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f"
uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5"
version = "4.3.1+4"
[[FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.1+14"
[[Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.10.1+5"
[[FriBidi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91"
uuid = "559328eb-81f9-559d-9380-de523a88c83c"
version = "1.0.10+0"
[[GLFW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"]
git-tree-sha1 = "dba1e8614e98949abfa60480b13653813d8f0157"
uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89"
version = "3.3.5+0"
[[GR]]
deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"]
git-tree-sha1 = "b83e3125048a9c3158cbb7ca423790c7b1b57bea"
uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71"
version = "0.57.5"
[[GR_jll]]
deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "e14907859a1d3aee73a019e7b3c98e9e7b8b5b3e"
uuid = "d2c73de3-f751-5644-a686-071e5b155ba9"
version = "0.57.3+0"
[[GeometryBasics]]
deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"]
git-tree-sha1 = "15ff9a14b9e1218958d3530cc288cf31465d9ae2"
uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326"
version = "0.3.13"
[[Gettext_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046"
uuid = "78b55507-aeef-58d4-861c-77aaff3498b1"
version = "0.21.0+0"
[[Glib_jll]]
deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b"
uuid = "7746bdde-850d-59dc-9ae8-88ece973131d"
version = "2.68.1+0"
[[Grisu]]
git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2"
uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
version = "1.0.2"
[[HTTP]]
deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"]
git-tree-sha1 = "c6a1fff2fd4b1da29d3dccaffb1e1001244d844e"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "0.9.12"
[[IniFile]]
deps = ["Test"]
git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8"
uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f"
version = "0.5.0"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[IterTools]]
git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.3.0"
[[IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.1"
[[JpegTurbo_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d735490ac75c5cb9f1b00d8b5509c11984dc6943"
uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
version = "2.1.0+0"
[[JuliaInterpreter]]
deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"]
git-tree-sha1 = "31c2eee64c1eee6e8e3f30d5a03d4b5b7086ab29"
uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a"
version = "0.8.18"
[[LAME_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c"
uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d"
version = "3.100.1+0"
[[LZO_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6"
uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac"
version = "2.10.1+0"
[[LaTeXStrings]]
git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.2.1"
[[Latexify]]
deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"]
git-tree-sha1 = "a4b12a1bd2ebade87891ab7e36fdbce582301a92"
uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316"
version = "0.15.6"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[LibVPX_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "12ee7e23fa4d18361e7c2cde8f8337d4c3101bc7"
uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a"
version = "1.10.0+0"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[Libffi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b"
uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490"
version = "3.2.2+0"
[[Libgcrypt_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"]
git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae"
uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4"
version = "1.8.7+0"
[[Libglvnd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"]
git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf"
uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29"
version = "1.3.0+3"
[[Libgpg_error_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9"
uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8"
version = "1.42.0+0"
[[Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.16.1+1"
[[Libmount_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73"
uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9"
version = "2.35.0+0"
[[Libtiff_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"]
git-tree-sha1 = "340e257aada13f95f98ee352d316c3bed37c8ab9"
uuid = "89763e89-9b03-5906-acba-b20f662cd828"
version = "4.3.0+0"
[[Libuuid_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066"
uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700"
version = "2.36.0+0"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LittleCMS_jll]]
deps = ["JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pkg"]
git-tree-sha1 = "e6ea89d915cdad8d264f7f9158c6664f879edcde"
uuid = "d3a379c0-f9a3-5b72-a4c0-6bf4d2e8af0f"
version = "2.9.0+0"
[[LogarithmicNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "d88b70111754e3660f80d3596a343ce42bf5ee84"
uuid = "aa2f6b4e-9042-5d33-9679-40d3a6b85899"
version = "0.4.2"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[LoweredCodeUtils]]
deps = ["JuliaInterpreter"]
git-tree-sha1 = "4bfb8b57df913f3b28a6bd3bdbebe9a50538e689"
uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b"
version = "2.1.0"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.6"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MatchCore]]
git-tree-sha1 = "90af9fe333f8c9851f952dfa7f335185c94567c0"
uuid = "5dd3f0b1-72a9-48ad-ae6e-79f673da005f"
version = "0.1.1"
[[MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"]
git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.0.3"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Measures]]
git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f"
uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e"
version = "0.3.1"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "4ea90bd5d3985ae1f9a908bd4500ae88921c5ce7"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.0"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[NiLang]]
deps = ["FixedPointNumbers", "LinearAlgebra", "LogarithmicNumbers", "MatchCore", "NiLangCore", "Reexport", "SparseArrays", "TupleTools"]
git-tree-sha1 = "3fe439482d8c08a15f929ae7278a6c7f737672d5"
uuid = "ab4ef3a6-0b42-11ea-31f6-e34652774712"
version = "0.9.1"
[[NiLangCore]]
deps = ["MatchCore", "TupleTools"]
git-tree-sha1 = "239f97ea947531cfe7a596746e31c8429c7169b9"
uuid = "575d3204-02a4-11ea-3f62-238caa8bf11e"
version = "0.10.3"
[[Ogg_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7937eda4681660b4d6aeeecc2f7e1c81c8ee4e2f"
uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051"
version = "1.3.5+0"
[[OpenJpeg_jll]]
deps = ["Libdl", "Libtiff_jll", "LittleCMS_jll", "Pkg", "libpng_jll"]
git-tree-sha1 = "e330ffff1c6a593fa44cc40c29900bee82026406"
uuid = "643b3616-a352-519d-856d-80112ee9badc"
version = "2.3.1+0"
[[OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "15003dcb7d8db3c6c857fda14891a539a8f2705a"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "1.1.10+0"
[[Opus_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "51a08fb14ec28da2ec7a927c4337e4332c2a4720"
uuid = "91d4177d-7536-5919-b921-800302f37372"
version = "1.3.2+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[PCRE_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488"
uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc"
version = "8.44.0+0"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "1.1.0"
[[Pixman_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29"
uuid = "30392449-352a-5448-841d-b1acce4e97dc"
version = "0.40.1+0"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlotThemes]]
deps = ["PlotUtils", "Requires", "Statistics"]
git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d"
uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a"
version = "2.0.1"
[[PlotUtils]]
deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"]
git-tree-sha1 = "501c20a63a34ac1d015d5304da0e645f42d91c9f"
uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043"
version = "1.0.11"
[[Plots]]
deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"]
git-tree-sha1 = "9f126950870ef24ce75cdd841f4b7cf34affc6d2"
uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
version = "1.18.0"
[[PlutoUI]]
deps = ["Base64", "Dates", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "Suppressor"]
git-tree-sha1 = "44e225d5837e2a2345e69a1d1e01ac2443ff9fcb"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.9"
[[Poppler_jll]]
deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "OpenJpeg_jll", "Pkg", "libpng_jll"]
git-tree-sha1 = "e11443687ac151ac6ef6699eb75f964bed8e1faa"
uuid = "9c32591e-4766-534b-9725-b71a8799265b"
version = "0.87.0+2"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Qt5Base_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"]
git-tree-sha1 = "ad368663a5e20dbb8d6dc2fddeefe4dae0781ae8"
uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1"
version = "5.15.3+0"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[RecipesBase]]
git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.1.1"
[[RecipesPipeline]]
deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"]
git-tree-sha1 = "2a7a2469ed5d94a98dea0e85c46fa653d76be0cd"
uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c"
version = "0.3.4"
[[Reexport]]
git-tree-sha1 = "5f6c21241f0f655da3952fd60aa18477cf96c220"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.1.0"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[Revise]]
deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "Requires", "UUIDs", "Unicode"]
git-tree-sha1 = "410bbe13d9a7816e862ed72ac119bda7fb988c08"
uuid = "295af30f-e4ad-537b-8983-00126c2a3abe"
version = "3.1.17"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Scratch]]
deps = ["Dates"]
git-tree-sha1 = "0b4b7f1393cff97c33891da2a0bf69c6ed241fda"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.1.0"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Showoff]]
deps = ["Dates", "Grisu"]
git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de"
uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f"
version = "1.0.3"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "2ec1962eba973f383239da22e75218565c390a96"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.0"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "896d55218776ab8f23fb7b222a5a4a946d4aafc2"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.5"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "2f6792d523d7448bbe2fec99eca9218f06cc746d"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.8"
[[StructArrays]]
deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"]
git-tree-sha1 = "000e168f5cc9aded17b6999a560b7c11dda69095"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.0"
[[Suppressor]]
git-tree-sha1 = "a819d77f31f83e5792a76081eee1ea6342ab8787"
uuid = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
version = "0.2.0"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
git-tree-sha1 = "8ed4a3ea724dac32670b062be3ef1c1de6773ae8"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.4.4"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TikzPictures]]
deps = ["LaTeXStrings", "Poppler_jll", "Requires"]
git-tree-sha1 = "06b36e2baa9b97814ef1993207b71e2e23e9efb5"
uuid = "37f6aa50-8035-52d0-81c2-5a1d08754b2d"
version = "3.3.3"
[[TupleTools]]
git-tree-sha1 = "3c712976c47707ff893cf6ba4354aa14db1d8938"
uuid = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
version = "1.3.0"
[[URIs]]
git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.3.0"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[Viznet]]
deps = ["Compose", "Dierckx"]
git-tree-sha1 = "7a022ae6ac8b153d47617ed8c196ce60645689f1"
uuid = "52a3aca4-6234-47fd-b74a-806bdf78ede9"
version = "0.3.3"
[[Wayland_jll]]
deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "3e61f0b86f90dacb0bc0e73a0c5a83f6a8636e23"
uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89"
version = "1.19.0+0"
[[Wayland_protocols_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll"]
git-tree-sha1 = "2839f1c1296940218e35df0bbb220f2a79686670"
uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91"
version = "1.18.0+4"
[[XML2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a"
uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"
version = "2.9.12+0"
[[XSLT_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"]
git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a"
uuid = "aed1982a-8fda-507f-9586-7b0439959a61"
version = "1.1.34+0"
[[Xorg_libX11_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"]
git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527"
uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc"
version = "1.6.9+4"
[[Xorg_libXau_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e"
uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec"
version = "1.0.9+4"
[[Xorg_libXcursor_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd"
uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724"
version = "1.2.0+4"
[[Xorg_libXdmcp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4"
uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05"
version = "1.1.3+4"
[[Xorg_libXext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3"
uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3"
version = "1.3.4+4"
[[Xorg_libXfixes_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4"
uuid = "d091e8ba-531a-589c-9de9-94069b037ed8"
version = "5.0.3+4"
[[Xorg_libXi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"]
git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246"
uuid = "a51aa0fd-4e3c-5386-b890-e753decda492"
version = "1.7.10+4"
[[Xorg_libXinerama_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"]
git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123"
uuid = "d1454406-59df-5ea1-beac-c340f2130bc3"
version = "1.1.4+4"
[[Xorg_libXrandr_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631"
uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484"
version = "1.5.2+4"
[[Xorg_libXrender_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96"
uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa"
version = "0.9.10+4"
[[Xorg_libpthread_stubs_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb"
uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74"
version = "0.1.0+3"
[[Xorg_libxcb_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"]
git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6"
uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b"
version = "1.13.0+3"
[[Xorg_libxkbfile_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2"
uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a"
version = "1.1.0+4"
[[Xorg_xcb_util_image_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97"
uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b"
version = "0.4.0+1"
[[Xorg_xcb_util_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"]
git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1"
uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5"
version = "0.4.0+1"
[[Xorg_xcb_util_keysyms_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00"
uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7"
version = "0.4.0+1"
[[Xorg_xcb_util_renderutil_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e"
uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e"
version = "0.3.9+1"
[[Xorg_xcb_util_wm_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67"
uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361"
version = "0.4.1+1"
[[Xorg_xkbcomp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"]
git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b"
uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4"
version = "1.4.2+4"
[[Xorg_xkeyboard_config_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"]
git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d"
uuid = "33bec58e-1273-512f-9401-5d533626f822"
version = "2.27.0+4"
[[Xorg_xtrans_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845"
uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10"
version = "1.4.0+3"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zstd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6"
uuid = "3161d3a3-bdf6-5164-811a-617609db77b4"
version = "1.5.0+0"
[[libass_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c"
uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0"
version = "0.14.0+4"
[[libfdk_aac_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab"
uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280"
version = "0.1.6+4"
[[libpng_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c"
uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f"
version = "1.6.38+0"
[[libvorbis_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"]
git-tree-sha1 = "c45f4e40e7aafe9d086379e5578947ec8b95a8fb"
uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a"
version = "1.3.7+0"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
[[x264_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f"
uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a"
version = "2020.7.14+2"
[[x265_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab"
uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76"
version = "3.0.0+3"
[[xkbcommon_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"]
git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6"
uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd"
version = "0.9.1+5"
"""
# ╔═╡ Cell order:
# ╟─e20e2d2e-4b28-4e32-8d80-ce029928a094
# ╟─f3e235e7-76b9-4c39-bc70-038539838ff4
# ╟─8308df59-3faa-4abf-8f05-119bbae48f64
# ╟─a3532a83-9fd3-4d24-b1bb-b52457317e51
# ╟─15657e4b-848e-43ad-a99f-37143d11705e
# ╟─f9675365-36aa-430c-b747-3bc4f602e6fb
# ╟─94c5eaa1-432c-4553-829e-f78d97f3c0ca
# ╟─bef6978d-e654-4364-b5eb-e9608cf68464
# ╟─046f7559-4af9-4982-b5c3-335add0911d7
# ╟─0a039bfa-571e-4fad-b73c-1324d08777fc
# ╟─bf7abacc-5b0a-4623-b2c5-af60183ad4b0
# ╟─3f1e4d7a-32a7-4c7e-92dd-465bac925e63
# ╟─95a21058-0b07-4859-af68-8ca5b48b2a77
# ╟─f68bcfb6-97ce-48d1-b0b8-e8466d4ac879
# ╟─2fe7c298-4c5d-464c-980b-6cd9a537ac1e
# ╟─49dab78a-7bd9-4faa-8a30-9af8a96e0c5b
# ╟─3d4ba750-8d62-48ac-bf96-691397689ddc
# ╟─7aa7b0ee-beeb-4a3e-abf1-aa71e916f4cd
# ╟─f4cb9212-181f-4338-b858-1d99c7f415e9
# ╟─c1bbaec8-4fb9-4ab8-a30d-06a286597de0
# ╟─6d7a07ff-be1b-4902-8a6d-7d9257c1157f
# ╟─0577d67f-648f-407c-8abf-507d086445bd
# ╟─eb10e436-bcce-4d81-891e-15158219fe80
# ╟─7c5a30fd-95f9-4bb8-b34f-b10b0f2a27f2
# ╟─abee1bee-ed01-4b05-a848-3aeb695a24ba
# ╟─b05538cc-de01-4b1e-a602-feb780cddf4a
# ╟─42c398ab-bb45-423f-b030-404e7582df5a
# ╟─48081dd4-2bf4-43a1-899c-0303b4fcedd3
# ╟─c6ef8479-639b-45c1-9b48-a5d2c233d3b8
# ╟─6f01cdc2-6ce9-41da-b279-b047c9779405
# ╟─876ad6cf-84c1-4e34-89de-6f9273ba3479
# ╟─9ca8912d-5fc5-4066-adb8-ad02f75c2cbe
# ╟─9aab5751-e9e0-46c0-8e66-4b98258fed08
# ╟─a29af398-ff22-44cb-a5aa-0b0409312be9
# ╟─c3db622f-e9ff-4d99-afb6-9db65c6cae7a
# ╟─12cbf4b7-9b55-423c-bf59-5cb18e167afd
# ╟─89a5ff44-1b04-4bd8-a40a-83382a027fb3
# ╟─51e7b853-8640-4415-a9a4-8c0e06ad916a
# ╟─a8fe838e-727d-4068-887d-17b1bf99f90b
# ╟─c7cd75cb-4c64-4704-b839-c5a556f89be7
# ╟─ec14fba6-0cb9-483f-b3ea-cc4c5e83c965
# ╟─f1abc5c1-2c34-422a-86c4-5ad8e7df8b7e
# ╟─8ad4e7c0-c496-4d29-ac09-e6525b1b4c0f
# ╟─757e2d78-c5ee-4b40-bfd6-1b39af338d9d
# ╟─cbea35c7-c3c8-48e7-bb47-d5e193aee2c4
# ╟─81013954-1c48-4c05-82c9-49b4bfafda95
# ╟─1924eff7-1423-4e90-8005-43113d9deb3d
# ╟─c7edbc15-cd59-45fd-a0dc-c48aadb1c096
# ╟─bf2c9da7-8c45-409f-82a3-979cd63ea993
# ╟─1c32a491-ac85-4132-82fd-9b846a8485df
# ╟─6cbf202f-34e4-42b6-a7a7-5d766bfdfc37
# ╟─d40b318f-bff2-4d0b-b2a6-d00933ac7567
# ╟─54f53a7b-74e8-433b-94fd-9fa7192dfca5
# ╟─b0dcad96-e439-4e09-9e92-8cad7ede79af
# ╟─0a15a2cf-2e7a-4bd7-ac78-0803fc3d5c73
# ╟─ffbc5616-d2d9-4ce4-996f-d1a743bb89b3
# ╟─2602d857-4a21-478a-97a2-58a177666f52
# ╟─cf27e340-578a-440d-8d4a-e5a2277d5205
# ╟─aa53fd68-5acd-488d-a096-5ce39759f481
# ╟─cb9a9ef0-c0dc-487c-8008-0f73f9910ef8
# ╟─f7e0478d-1839-4684-9265-ee990fe9da45
# ╟─751e32d6-2582-4b1d-9558-124b1ef54f81
# ╟─5f18987d-a69e-4db9-96d3-426ed298d9b8
# ╟─66495c77-3bbc-4731-b9e1-db11bbc24283
# ╟─84b867a3-804e-4e7e-a56c-0ffc1f4e6683
# ╟─4642d311-ef0b-4c29-901d-b5398a3ca7b6
# ╟─96c3d50b-8a79-4de0-b7e0-c63c3b769b74
# ╟─066aa825-81e4-404d-bf5a-6a9431969702
# ╟─6e99ed64-a896-450e-8bab-845e0fe971ae
# ╟─7f803113-653a-4dfd-93f0-83babb253b32
# ╟─7eb29d49-05f5-47e9-b4f5-4f31c5cd37ce
# ╟─aefdec07-dcef-4e00-bcb0-4747250cdd9b
# ╟─cbe4abaf-46f9-4726-97ae-cf3c378abaaf
# ╟─4f0c81f5-ce5f-4f73-a528-9feff4a7fc14
# ╠═e81de385-0070-49a9-a889-8fcf9d9e2951
# ╟─8249b820-8fb1-45d4-a95c-9c81e62e8216
# ╟─6503b377-b2d5-48be-90a4-97947afb4e5f
# ╟─53a571dd-cac7-432a-869c-b93a8fe05e17
# ╟─2e5c7f59-dd35-4846-815a-b92eabeee089
# ╟─7b326477-43b6-4a6e-8862-12e8b70e1ad9
# ╟─47cd7560-a29e-4b55-bef5-28daa1cdb834
# ╟─59ab4431-ea4d-4707-9a42-d50eafa40b56
# ╟─3630b412-beeb-455a-a4b8-1e1d50860266
# ╟─ba6347d6-4ad0-403b-824f-dcf290a7c002
# ╟─a2f4975d-eeee-4a2d-97dd-dd0cfd29d665
# ╟─32d411e9-b01d-4ad2-b4aa-2f091034e6c0
# ╟─e5b83421-dd94-43ad-84eb-ca558bff6a2d
# ╟─118642ad-1aad-4f91-8da0-55a417b67750
# ╟─45171ecc-9d34-4ab6-a00b-ec9c9afc33f8
# ╟─6cd60f7d-d7ce-4189-a2dd-e47ce6825741
# ╟─ff3fc929-f448-41be-8f60-65de33dff36a
# ╟─5ec2649e-9988-4f38-896a-64ef6ed91d82
# ╟─aa8475c3-c68b-4200-8634-ace33f525417
# ╟─aa22f905-b69d-405e-b09a-a765d60f6079
# ╟─2dcbaac0-2fad-4292-ad31-8188a60876da
# ╟─6bad4f5f-806f-480a-ae16-2582761ce5e3
# ╟─e200dde3-9033-45b5-bfe0-2d03753b2c11
# ╟─d7a4b342-ef0a-44f9-b88d-bbb04483e8b3
# ╟─908f19a2-6d32-4776-95eb-b249a8155ddc
# ╟─05fc1fae-b378-4c39-b060-74ca635745ec
# ╟─d0573bf9-0fd6-4512-bc13-17aa23a3265b
# ╟─d8998d5f-65b2-4850-aef9-f19ecc192eca
# ╟─6b4c180c-9a12-4e3d-9336-1431e7c5875a
# ╟─0eb66cc9-93c0-4f07-b31b-a9bf9000260e
# ╟─85bf9f92-30f1-4e05-8d07-d8e481f20ccb
# ╟─fbba0a91-9f48-4d91-90e7-f6a7df3227f9
# ╟─7fc81b9f-73ed-4780-9204-ddf39467e58f
# ╟─d08ae188-937f-474b-92d7-cb8eeda063fe
# ╟─7ee8cfc9-26b2-4fe4-8263-1f4d2f7c276d
# ╠═db9a97b1-f76d-4f51-96c6-0159469c5adb
# ╟─1669f5e3-efe1-4b79-a2b6-11ed7476a2a1
# ╠═5000f4c3-5416-4e53-88ae-e30d8d09827e
# ╠═2413c061-89de-403f-8011-e458f5a9859d
# ╟─d4704779-9261-478b-bbf6-551220783e12
# ╠═4be065b5-0841-4d54-b9ab-d6770d4d9d94
# ╠═e850e53d-cf61-4fc7-9cb3-e318ae957f0b
# ╟─a267ea5f-8bd5-4ee0-9c8d-47e2d3b81692
# ╟─c02520a3-3375-4d83-a0dc-1aeac2aa7d5f
# ╟─2e18fc92-4185-493b-9ce8-cca63dad7d2d
# ╟─acc7b185-e4df-4aca-aa42-554215065384
# ╟─00c9e973-7e06-4483-bf4c-be7374707118
# ╟─83ff3fc3-bcd8-4235-a42f-1d75c7d6aa5b
# ╟─b308e270-6b40-4946-ac92-c705823f2c1e
# ╟─e483b3d4-d01c-4a98-8e68-e8120a7d95a7
# ╟─74017e78-0f02-41bb-a160-5f2d26c18268
# ╟─0b3735c2-695c-4225-843e-16ca17aac0eb
# ╟─d7942b37-f821-494a-8f18-5f267aa3457a
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
================================================
FILE: notebooks/margolus.jl
================================================
### A Pluto.jl notebook ###
# v0.12.21
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ 845b7d0a-7ca2-11eb-2683-cd370811bf68
using NiLang
# ╔═╡ 88912bea-7ca2-11eb-1cde-db111d594c20
using Viznet, PlutoUI, Compose
# ╔═╡ f42405a8-7ca2-11eb-133b-df4f1ce9bb19
html""""""
# ╔═╡ 66e972c2-7ca2-11eb-0a7a-a9305dd20511
md"# BBMCA - NiLang implementation"
# ╔═╡ c8722cea-7ca5-11eb-3e7d-0f39322ea40a
md"Check [Physics-like models of computation](https://www.sciencedirect.com/science/article/abs/pii/0167278984902525) (Norman Margolus, 1984) for theories about Billiard ball celluar automata (BBMCA)."
# ╔═╡ 845d1458-7ca2-11eb-0e69-11a10b9894a8
@i function load_and_clear!(x, config, i, j)
m, n ← size(config)
x ⊻= config[i,j] # to make it faster, should put `@inbounds` before it
config[i,j] ⊻= x
x ⊻= config[i,mod1(j+1, n)] << 1
config[i,mod1(j+1, n)] ⊻= x >> 1
x ⊻= config[mod1(i+1, m),j] << 2
config[mod1(i+1, m),j] ⊻= x >> 2
x ⊻= config[mod1(i+1, m),mod1(j+1, n)] << 3
config[mod1(i+1, m),mod1(j+1, n)] ⊻= x >> 3
end
# ╔═╡ 84680136-7ca2-11eb-1df5-ffa0b4b9d126
@i function margolus_rule(y, x)
# remove reversibility check to make it run faster
@invcheckoff if x==6
y ⊻= 9
elseif x==9
y ⊻= 6
elseif x==4
y ⊻= 2
elseif x==2
y ⊻= 4
elseif x==1
y ⊻= 8
elseif x==8
y ⊻= 1
else
y ⊻= x
end
end
# ╔═╡ 845c1292-7ca2-11eb-3e56-996aa5229b4e
@i function update_bbmca!(config, iseven)
# computing offsets, and borrow some ancillas from system
@routine begin
offset ← 1
m, n ← size(config)
if !iseven
offset += 1
end
end
for j=offset:2:n
for i=offset:2:m
x ← 0
y ← 0
# load block to `x` and clean up original data
load_and_clear!(x, config, i, j)
# compute new config to `y`
margolus_rule(y, x)
# clean up `x` with the following observation:
# applying margolus rule twice restores the configuration
margolus_rule(x, y)
# store `y` to block
(~load_and_clear!)(y, config, i, j)
# ancillas `x` and `y` are returned to the pool automatically
end
end
# uncompute `offset`
~@routine
end
# ╔═╡ 34006d60-7ca3-11eb-3c51-c9758394b838
md"# Visualization"
# ╔═╡ 846e21da-7ca2-11eb-05a3-51e22ed04147
function showconfig(ba::AbstractMatrix)
m, n = size(ba, 1), size(ba, 2)
lt = Viznet.SquareLattice(n, m)
brush1 = nodestyle(:square, fill("black"), stroke("#888888"), linewidth(unit(lt)*mm); r=unit(lt)/2.2)
brush0 = nodestyle(:square, fill("white"), stroke("#888888"), linewidth(unit(lt)*mm); r=unit(lt)/2.2)
canvas() do
for i=1:m, j=1:n
(ba[i, j] == 1 ? brush1 : brush0) >> lt[j,i]
end
end
end
# ╔═╡ 846e8170-7ca2-11eb-351e-eb45c629f6a6
@bind btn Clock(0.1)
# ╔═╡ 84730f04-7ca2-11eb-3b13-3fd82a9e2109
# initial configuration
config = let
x=zeros(Int, 10, 10)
x[1,1] = 1
x
end;
# ╔═╡ 84763e9c-7ca2-11eb-356c-c391966cdc98
# parity - Note: BBMCA is a two state CA
bbmca_parity = Ref(true)
# ╔═╡ 847711e6-7ca2-11eb-326a-15f6bfc05347
let
btn
# update
update_bbmca!(config, bbmca_parity[])
# change parity
bbmca_parity[] = !(bbmca_parity[])
# visualize
Compose.set_default_graphic_size(10cm, 10cm)
showconfig(config)
end
# ╔═╡ Cell order:
# ╟─f42405a8-7ca2-11eb-133b-df4f1ce9bb19
# ╟─66e972c2-7ca2-11eb-0a7a-a9305dd20511
# ╟─c8722cea-7ca5-11eb-3e7d-0f39322ea40a
# ╠═845b7d0a-7ca2-11eb-2683-cd370811bf68
# ╠═845c1292-7ca2-11eb-3e56-996aa5229b4e
# ╠═845d1458-7ca2-11eb-0e69-11a10b9894a8
# ╠═84680136-7ca2-11eb-1df5-ffa0b4b9d126
# ╟─34006d60-7ca3-11eb-3c51-c9758394b838
# ╠═88912bea-7ca2-11eb-1cde-db111d594c20
# ╠═846e21da-7ca2-11eb-05a3-51e22ed04147
# ╟─846e8170-7ca2-11eb-351e-eb45c629f6a6
# ╠═84730f04-7ca2-11eb-3b13-3fd82a9e2109
# ╠═84763e9c-7ca2-11eb-356c-c391966cdc98
# ╠═847711e6-7ca2-11eb-326a-15f6bfc05347
================================================
FILE: notebooks/reversibleprog.jl
================================================
### A Pluto.jl notebook ###
# v0.15.0
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ f3e235e7-76b9-4c39-bc70-038539838ff4
begin
using Revise, Viznet, Compose, PlutoUI, Random, TikzPictures
function leftright(a, b; width=600, leftcellwidth=0.5)
HTML("""
| $(html(a)) | $(html(b)) |
| $(html(a)) |
| $(html(b)) |
Harvard university
"""
img2 = html"""
"""
leftright(updown(img2, html"
""", md"Feynman, Richard P.
**Feynman Lectures on Computation**
(2018)")
# ╔═╡ 046f7559-4af9-4982-b5c3-335add0911d7
html"""


"""
txt2 = md"DNA copying is a living copy machine
$E \sim 100k T$"
img2 = html"""
"""
txt3 = md"""
Adiabatic CMOS [Athas, 1994]
$E \sim 10^6 kT$
"""
img3 = html"""
"""
txt4 = md"""Adiabatic superconducting devices [Takeuchi, 2014]
$E \sim kT$
"""
img4 = html"""
"""
updown(leftright(updown(img1, txt1), updown(img2, txt2)), leftright(updown(img3, txt3), updown(img4, txt4)))
end
# ╔═╡ e483b3d4-d01c-4a98-8e68-e8120a7d95a7
md"# Summary
* An isolated system is reversible,
* Our programs are not reversible,
* Need a heatbath
* Dissipate heat to heat bath: ``kT \log 2``/bit (Landauer's principle),

*Youtube*: Michael P. Frank: Fundamental Physics of Reversible Computing — An Introduction, Part 1
*Loophole*: need to take algorithmic overheads into consideration!
"
# ╔═╡ 20c34526-c7c4-11eb-21fa-d706fd684a4c
md"# A short introduction to the reversible programming"
# ╔═╡ 3f96abdf-fb5f-4d79-a288-e20b8c1f55d1
html"""
"""
# ╔═╡ 5d51231a-8bf0-4414-9a39-cea264df84f2
md"Initially written by Jinguo Liu and Taine Zhao (The author of MLStyle)"
# ╔═╡ e10e0be8-b26e-4719-92dc-8ca46af0b4b5
md"## Feature 1. one function for two"
# ╔═╡ b1a9946b-82b4-4954-8bb9-5df035eaefe4
md"Example: an identity mapping ``(x, y) \mapsto (x,y)`` "
# ╔═╡ 00342a51-36d8-4fdd-aab7-ee02e2122c49
@i function f1(x1, x2)
# will return inputs automatically for you
end
# ╔═╡ 40c1c48d-0e5a-4a47-b7e4-8f7666281249
f1(2, 3)
# ╔═╡ 59df1f80-9be1-4b26-b263-ca0c7a0b9ab7
(~f1)(2, 3)
# ╔═╡ 8320d326-c1ab-4807-befb-13dda3480bf5
md"## Feature 2. every instruction is reversible, every object is ''mutable''"
# ╔═╡ 93238608-3b86-49f1-ad60-9360e12cff1c
md"General design patterns
* `y += f(x)`
* `y -= f(x)`
* `y ⊻= f(x)`
There are also instructions like `SWAP`, `ROT`."
# ╔═╡ f657c3fb-e140-4c76-8065-54f1cb6d05eb
md"Example: mutating fields of complex numbers"
# ╔═╡ 629a2549-745c-48a2-9bbc-a8f5fb046d11
@i function f2(x1::Complex, x2::Complex)
x2 += exp(x1) # accumulative form
SWAP(x1.im, x2.im) # other primitive functions
f1(x1, x2) # other reversible functions
end
# ╔═╡ 640e0029-7931-4afd-bdf9-fed317efbd8e
md" $(@bind expand_f2 CheckBox()) macroexpand"
# ╔═╡ 6930345b-6e93-4b35-8d4f-91ad49141fa1
if expand_f2
macroexpand(NiLang, :(@i function f2(x1::Complex, x2::Complex)
x2 += exp(x1)
SWAP(x1.im, x2.im)
end)) |> NiLangCore.rmlines
end
# ╔═╡ 090522bf-0ff2-4022-8460-aec6e37e936a
f2(1.0+2im, 2.0+4.9im)
# ╔═╡ 88c30609-2f42-405e-a14c-dfab44aef23b
(~f2)(f2(1.0+2im, 2.0+4.9im)...)
# ╔═╡ 2dc665a9-b131-4fef-acde-db346eb0f48b
md"## Feature 3. One can reverse the control flows too"
# ╔═╡ 4e479f48-42cd-476d-8604-08ecbb503a90
md"""
#### Reversible `if` statement
"""
# ╔═╡ dc41e99a-f598-4bf6-9f76-ecdb04f5f40c
leftright(md"
```julia
if (precondition[, postcondition])
...
end
```
", md"
```julia
if (postcondition[, precondition])
~(...)
end
```
")
# ╔═╡ 97e0bae1-69ac-4cbf-b9d9-6b38180edd78
TikzPicture(L"""
\node [test] (pre) {precondition};
\node [proc, it] (st1) [right=of pre] {statements 1};
\node [proc, it] (st2) {statements 2};
\node [test] (post1) [right=of st1] {postcondition};
\node [test] (post2) [right=of st2] {postcondition};
\node [proc,red] (err1) [above=of post1] {invertibility error};
\node [proc,red] (err2) [below=of post2] {invertibility error};
\draw [->,black] (pre.east) -- (st1) node[midway,above] {T};
\draw [->,black] (pre.south) |- (st2) node[midway,below] {F};
\draw [->,black] (-2.5, 0.0) -- (pre.west);
\draw [->,black] (st1) -- (post1);
\draw [->,black] (st2) -- (post2);
\draw [->,red] (post1) -- (err1) node[midway,right] {F};
\draw [->,red] (post2) -- (err2) node[midway,right] {T};
\draw [->,black] (post1.east) -- (12, 0) node[midway,above] {T};
\draw [black] (post2.east) -| (11, 0) node[midway,right] {F};
""", options=raw" font=\sffamily\small,
>={Triangle[]},
*/.tip={Circle[]},
start chain=going below,
node distance=18mm and 40mm,
every join/.style={norm},
base/.style={draw, on chain, on grid, align=center, minimum height=4ex, inner color=black!50!gray!10, outer color=black!50!gray!15},
proc/.style={base, rectangle, text width=8em},
test/.style={base, diamond, text centered, aspect=2.6,inner sep=-0ex},
norm/.style={->, draw, black},
it/.style={font={\sffamily\small\itshape}}", preamble=raw"\usetikzlibrary{shapes.geometric,arrows.meta,chains,positioning,quotes}")
# ╔═╡ 355ba831-6be0-456a-8f94-36acd2365f17
md"Example: obtaining the absolute value ``x \mapsto |x|``"
# ╔═╡ 003c3e68-600e-4688-832b-5e061572b128
@i function abs_incorrect(x)
if x < 0
NEG(x)
end
end
# ╔═╡ 1fb196f9-0f0a-42dc-b094-077cdf18d13d
abs_incorrect(-3)
# ╔═╡ aa9a679e-63bc-4951-b6f4-65316e212bc8
@i function abs_correct(x, sgn)
if (x < 0, sgn)
NEG(x)
sgn ⊻= true
end
end
# ╔═╡ e6fd3c2d-cadd-40d7-a575-b1e68c45ee13
abs_correct(-3, false)
# ╔═╡ 02b7e1b4-4622-4e68-966e-ff79817557d1
md"#### Reversible `while` statement"
# ╔═╡ 364fd613-0ebd-4b45-a3fd-f9baa8c487e3
leftright(md"
```julia
@from condition1 while condition2
...
end
```
", md"
```julia
@from !(condition2) while !(condition1)
~(...)
end
```
")
# ╔═╡ 75d8283a-b331-4648-84a8-489e168e33f9
TikzPicture(L"""
\node [test] (c1) {condition 1};
\node [test] (c2) [right=of c1] {condition 2};
\node [test] (c3) [right=of c2] {condition 1};
\node [proc, it] (st1) [above=of c2] {statements};
\node [proc,red] (err1) [below=of c1] {invertibility error};
\node [proc,red] (err2) [right=of c3] {invertibility error};
\draw [->,black] (c2) -- (st1) node[midway,right] {T};
\draw [->,black] (st1) -| (c3);
\draw [->,black] (-2.5, 0.0) -- (c1.west);
\draw [->,black] (c1) -- (c2) node[midway,above] {T};
\draw [->,black] (c3) -- (c2) node[midway,above] {F};
\draw [->,red] (c1) -- (err1) node[midway,right] {F};
\draw [->,red] (c3) -- (err2) node[midway,above] {T};
\draw [->,black] (c2.south) |- (11, -2) node[midway,below] {F};
""", options=raw" font=\sffamily\small,
>={Triangle[]},
*/.tip={Circle[]},
start chain=going below,
node distance=18mm and 40mm,
every join/.style={norm},
base/.style={draw, on chain, on grid, align=center, minimum height=4ex, inner color=black!50!gray!10, outer color=black!50!gray!15},
proc/.style={base, rectangle, text width=8em},
test/.style={base, diamond, text centered, aspect=2.6,inner sep=-0ex},
norm/.style={->, draw, black},
it/.style={font={\sffamily\small\itshape}}", preamble=raw"\usetikzlibrary{shapes.geometric,arrows.meta,chains,positioning,quotes}")
# ╔═╡ 2207c2fb-4a52-4766-8dd3-03872744aa74
md"example: computing Fibonacci numbers"
# ╔═╡ 288331c3-2dfb-4941-985f-554be409c0ab
@i function fib(y, n)
@invcheckoff if (n >= 1, ~)
counter ← 0
counter += n
@from counter==n while counter > 1
counter -= 1
fib(y, counter)
counter -= 1
end
counter -= n % 2
counter → 0
end
y += 1
end
# ╔═╡ 12a30359-d6ff-4113-bab5-b198e908cf1a
fib(0, 10)
# ╔═╡ 23ea88b4-6b89-462a-92da-0e8bdf5c73b5
(~fib)(89, 10)
# ╔═╡ 4bfdabd6-b7ff-40fb-b567-52910acb5a07
md"""
#### Reversible `for` statement
$(
leftright(md"
```julia
for iter = start:step:stop
...
end
```
", md"
```julia
for iter = stop:-step:start
~(...)
end
```
")
)
"""
# ╔═╡ 2603147b-e7a2-4cae-b88f-2cfebe16bacb
md"## Feature 4. storage access should also be reversible"
# ╔═╡ 12d49e2e-cc6e-48d6-b11a-e7c311453bfc
md"""
$(
leftright(updown(md"
```julia
var ← zero(T)
```", md"borrow some memory from system and allocate it to variable var of type T."), updown(md"
```julia
var → zero(T)
```
", md"return the zero cleared variable to system."))
)
"""
# ╔═╡ 10312dc7-e861-4c89-b2fd-672cfe8850bf
md"""
$(
leftright(updown(md"
```julia
dict[key] ← variable
```", md"create a new entry, asserting `key` does not exist"), updown(md"
```julia
dict[key] → variable
```
", md"asserting the value of an existing key, and delete it."))
)
"""
# ╔═╡ 03f58f2a-24b6-4235-9102-71a19b9679ac
md"Example: implementing `y += log(x)` for complex number.
```math
\log(z) = \log(|z|) + i {\rm Arg}(z)
```"
# ╔═╡ 22a5853e-4f9a-4da0-bc03-84a6b0061cfe
@i function clog_v1(y::Complex{T}, squaren::T, n::T, x::Complex{T}) where T
squaren += x.re^2
squaren += x.im^2
n += sqrt(squaren)
y.re += log(n)
y.im += atan(x.im, x.re)
end
# ╔═╡ 1ecdc4d2-b3ca-4f5c-a454-0f0bc51b6ec2
@test clog_v1(0.0im, 0.0, 0.0, 3.0im)[1] ≈ log(3.0im)
# ╔═╡ 927ea209-2ccd-48d5-b69e-0a3c735bb496
md"""Bennett, Charles H. "Logical reversibility of computation." (1973)."""
# ╔═╡ 962b204c-8195-4938-944c-b7c4a52e70bd
TikzPicture(L"""
\def\r{0.15};
\foreach \x in {1,...,5}{
\fill[fill=black] (\x, 0) circle [radius=\r];
\node[white] at (\x, 0) {$s_{\x}$};
}
\fill[fill=white] (5.5, 0) circle [radius=\r];
\foreach \x in {1,...,4}{
\draw [black, thick, ->] (\x+\r, \r) .. controls (\x+0.5, 0.3) .. (\x+1-\r, \r);
\node at (\x+0.5, 0.4) {\x};
}
\foreach[evaluate={\y=int(8-\x)}] \x in {1,...,3}{
\draw [red, thick, <-] (\x+\r, -\r) .. controls (\x+0.5, -0.3) .. (\x+1-\r, -\r);
\node at (\x+0.5, -0.4) {\y};
}
"""
, options="scale=2.0", preamble="")
# ╔═╡ af58a0f8-e3fd-465f-b1ae-6fbd94123c91
@i function clog_v2(y::Complex{T}, x::Complex{T}) where T
######### compute ########
n ← zero(T)
squaren ← zero(T)
squaren += x.re^2
squaren += x.im^2
n += sqrt(squaren)
########## copy ##########
y.re += log(n)
y.im += atan(x.im, x.re)
####### uncompute ########
n -= sqrt(squaren)
squaren -= x.im^2
squaren -= x.re^2
n → zero(T)
squaren → zero(T)
end
# ╔═╡ 4f993061-c6c3-4acb-aef3-8453e7b83997
@test clog_v2(0.0im, 3.0im)[1] ≈ log(3.0im)
# ╔═╡ 6e5cf9bb-7cab-4da1-8831-541e0ee3bde8
@i @inline function clog_v3(y::Complex{T}, x::Complex{T}) where T
# @invcheckoff turns of reversibility check and accelerate code
@routine @invcheckoff begin
@zeros T squaren n
squaren += x.re^2
squaren += x.im^2
n += sqrt(squaren)
end
y.re += log(n)
y.im += atan(x.im, x.re)
~@routine
end
# ╔═╡ 320e4114-f0da-4106-90ab-a9f7b0ef0099
@test clog_v3(0.0im, 3.0im)[1] ≈ log(3.0im)
# ╔═╡ 2d944e8d-e19d-48be-ab7f-c3e54e9f43ef
md"# III. Automatic differentiation in NiLang"
# ╔═╡ 4f53fa8e-ea9b-461f-8199-7bbe2a3ef544
md"## Scalar or tensor"
# ╔═╡ 56ea4f5f-ea88-46d6-beed-b9a7afed315d
md"Differentiating matrix vector multiplication"
# ╔═╡ 124a9ecd-0bda-4823-9507-92efcf449d9c
md"""
```math
y = A x
```
"""
# ╔═╡ 6819498c-7a46-48fa-9eec-38341dca72f9
let
tl = md"tensor level view"
tl2 = md"
```julia
y = A * x
```
"
sl = md"scalar level view"
sl2 = md"
```julia
for j=1:n
for i=1:m
y[i] += A[i,j] * x[j]
end
end
```"
leftright(updown(tl, tl2, width=300), updown(sl, sl2, width=300))
end
# ╔═╡ 400f79cf-9260-4195-9582-0e8c486ddb5a
html"""implementing AD on scalars
"""
# ╔═╡ 45448141-214d-4e08-978e-5d1d25f763cd
md"## Case 2: differentiating linear algebra functions"
# ╔═╡ dfe3cc09-6e27-4166-9a4e-2dd22f1e08a2
md"The definition of QR factorization
```math
A = QR
```
"
# ╔═╡ 3d07e6d1-2964-4718-b7bc-114d82389aa4
md"We implement Householder QR"
# ╔═╡ c00cd648-d685-4a17-9ddf-39c06dd5f066
md"""
$Q = H_1H_2 \ldots H_n$
"""
# ╔═╡ 5390c3ba-1507-4db9-9972-8295cbe493bc
md"""
```math
\begin{align}
&H = 1-\beta vv^T
\end{align}
```
"""
# ╔═╡ 33a0bda1-c943-4792-bc3d-fbf1adf16d0a
let
img = TikzPicture(L"""
\draw[->,thick] (0, 0) -- (1, 1);
\draw[->,thick] (0, 0) -- ({sqrt(2)}, 0);
\draw[thick,dashed] (0, 0) -- (1.5, {1.5*tan(22.5)});
\node at (1.5, 0) {$e$};
\node at (1.1, 1.1) {$x$};
\node at (1.4, {1.6*tan(22.5)}) {$v$};
\node at (2.6, 0.5) {$v = x-\|x\|_2 e$};
""", options="scale=2.0", preamble="")
HTML("""
"""
# ╔═╡ c24e7391-a187-4a7e-aab7-93027f7db965
md"The space optimal solution for 16 grids requires recursive Bennett's algorithm"
# ╔═╡ 12734842-d66f-4bfc-a9ad-01adeb2450e0
# stepfunc: step function
# state: state dictionary, initial value should contain entry `state[base]`
# k: compute `k` chunks forward and `k-1` chunks backward
# base: starting point
# len: number of steps to compute
@i function bennett_alg!(stepfunc, state::Dict{Int,T}, k::Int, base, len, args...; kwargs...) where T
if len == 1 # lowest level
state[base+1] ← _zero(state[base])
stepfunc(state[base+1], state[base], args...; kwargs...)
else
@safe @assert len % k == 0
@routine begin # compute block size
chunksize ← 0
start ← 0
chunksize += len ÷ k
start += base
for j=1:k-1
bennett_alg!(stepfunc, state, k, start, chunksize, args...; kwargs...)
start += chunksize
end
end
bennett_alg!(stepfunc, state, k, start, chunksize, args...; kwargs...)
~@routine
end
end
# ╔═╡ 357fd442-6e6e-4b14-b2d0-efd3fa775d0b
bennett_alg!(i_find_maximum_step, Dict(0=>FindMaxState(x[1], 1)), 2, 0, length(x)-1, x)[2]
# ╔═╡ 1bca53ef-3438-4db5-9900-9fee71936a62
@i function loss(result, state, x)
nstep ← length(x)-1
bennett_alg!((@skip! i_find_maximum_step), state, 2, 0, nstep, x)
result += state[nstep].m
nstep → length(x)-1
end
# ╔═╡ 4e273d1f-a00e-49ca-9f8d-a0f6930550fb
let
@testset "qr pivoted gradient" begin
Random.seed!(3)
A = randn(ComplexF64, 5, 5)
res = alloc_qr(A)
res, = qr_pivoted!(res, copy(A))
res2 = LinearAlgebra.qrfactPivotedUnblocked!(copy(A))
@test res.factors ≈ res2.factors
@test res.τ ≈ res2.τ
@test res.jpvt ≈ res2.jpvt
# rank deficient initial matrix
n = 50
U = LinearAlgebra.qr(randn(n, n)).Q
Σ = Diagonal((x=randn(n); x[n÷2+1:end] .= 0; x))
A = U*Σ*U'
res = alloc_qr(A)
@test rank(A) == n ÷ 2
qrres = qr_pivoted!(deepcopy(res), copy(A))[1]
@test count(x->(x>1e-12), sum(abs2, QRPivoted(qrres.factors, qrres.τ, qrres.jpvt).R, dims=2)) == n ÷ 2
#A = randn(ComplexF64, n, n)
@i function loss(y, qrres, A)
qr_pivoted!(qrres, A)
y += abs(qrres.factors[1])
end
nrloss(A) = loss(0.0, deepcopy(res), A)[1]
ngA = zero(A)
δ = 1e-5
for j=1:size(A, 2)
for i=1:size(A, 1)
A_ = copy(A)
A_[i,j] -= δ/2
l1 = nrloss(copy(A_))
A_[i,j] += δ
l2 = nrloss(A_)
ngA[i,j] = (l2-l1)/δ
end
end
gA = NiLang.AD.gradient(loss, (0.0, res, A); iloss=1)[3]
@test real.(gA) ≈ ngA
end
end
# ╔═╡ 50f7070d-9f6f-4025-9be3-13812c3000eb
let
x = [1.0, 3.0, 2.0, 1.3, -1.0]
NiLang.gradient(loss, (0.0, Dict(0=>FindMaxState(x[1], 1)), x); iloss=1)
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Compose = "a81c6b42-2e10-5240-aca2-a61377ecd94b"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NiLang = "ab4ef3a6-0b42-11ea-31f6-e34652774712"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TikzPictures = "37f6aa50-8035-52d0-81c2-5a1d08754b2d"
TreeverseAlgorithm = "e1c63c57-2fea-45bf-a8bf-df3ea6afb545"
Viznet = "52a3aca4-6234-47fd-b74a-806bdf78ede9"
[compat]
BenchmarkTools = "~1.0.0"
Compose = "~0.9.2"
ForwardDiff = "~0.10.18"
NiLang = "~0.9.1"
PlutoUI = "~0.7.9"
Revise = "~3.1.17"
TikzPictures = "~3.3.3"
TreeverseAlgorithm = "~0.1.0"
Viznet = "~0.3.3"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Statistics", "UUIDs"]
git-tree-sha1 = "01ca3823217f474243cc2c8e6e1d1f45956fe872"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.0.0"
[[Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+0"
[[Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "f2202b55d816427cd385a9a4f3ffb226bee80f99"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.1+0"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "be770c08881f7bb928dfd86d1ba83798f76cf62a"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "0.10.9"
[[CodeTracking]]
deps = ["InteractiveUtils", "UUIDs"]
git-tree-sha1 = "8ad457cfeb0bca98732c97958ef81000a543e73e"
uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
version = "1.0.5"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dc7dedc2c2aa9faf59a55c622760a25cbefbe941"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.31.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[Compose]]
deps = ["Base64", "Colors", "DataStructures", "Dates", "IterTools", "JSON", "LinearAlgebra", "Measures", "Printf", "Random", "Requires", "Statistics", "UUIDs"]
git-tree-sha1 = "c6461fc7c35a4bb8d00905df7adafcff1fe3a6bc"
uuid = "a81c6b42-2e10-5240-aca2-a61377ecd94b"
version = "0.9.2"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.9"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[Dierckx]]
deps = ["Dierckx_jll"]
git-tree-sha1 = "5fefbe52e9a6e55b8f87cb89352d469bd3a3a090"
uuid = "39dd38d3-220a-591b-8e3c-4c3a8c710a94"
version = "0.5.1"
[[Dierckx_jll]]
deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"]
git-tree-sha1 = "a580560f526f6fc6973e8bad2b036514a4e3b013"
uuid = "cd4c43a9-7502-52ba-aa6d-59fb2a88580b"
version = "0.0.1+0"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "214c3fcac57755cfda163d91c58893a8723f93e9"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.0.2"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "a32185f5428d3986f47c2ab78b1f216d5e6cc96f"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.5"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.2.10+0"
[[FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.93+0"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "NaNMath", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "e2af66012e08966366a43251e1fd421522908be6"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.18"
[[FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.10.4+0"
[[Gettext_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046"
uuid = "78b55507-aeef-58d4-861c-77aaff3498b1"
version = "0.21.0+0"
[[Glib_jll]]
deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b"
uuid = "7746bdde-850d-59dc-9ae8-88ece973131d"
version = "2.68.1+0"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[IterTools]]
git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.3.0"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.1"
[[JpegTurbo_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d735490ac75c5cb9f1b00d8b5509c11984dc6943"
uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
version = "2.1.0+0"
[[JuliaInterpreter]]
deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"]
git-tree-sha1 = "31c2eee64c1eee6e8e3f30d5a03d4b5b7086ab29"
uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a"
version = "0.8.18"
[[LZO_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6"
uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac"
version = "2.10.1+0"
[[LaTeXStrings]]
git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.2.1"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[Libffi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b"
uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490"
version = "3.2.2+0"
[[Libgcrypt_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"]
git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae"
uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4"
version = "1.8.7+0"
[[Libgpg_error_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9"
uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8"
version = "1.42.0+0"
[[Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.16.1+1"
[[Libmount_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73"
uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9"
version = "2.35.0+0"
[[Libtiff_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"]
git-tree-sha1 = "340e257aada13f95f98ee352d316c3bed37c8ab9"
uuid = "89763e89-9b03-5906-acba-b20f662cd828"
version = "4.3.0+0"
[[Libuuid_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066"
uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700"
version = "2.36.0+0"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LittleCMS_jll]]
deps = ["JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pkg"]
git-tree-sha1 = "e6ea89d915cdad8d264f7f9158c6664f879edcde"
uuid = "d3a379c0-f9a3-5b72-a4c0-6bf4d2e8af0f"
version = "2.9.0+0"
[[LogExpFunctions]]
deps = ["DocStringExtensions", "LinearAlgebra"]
git-tree-sha1 = "1ba664552f1ef15325e68dc4c05c3ef8c2d5d885"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.2.4"
[[LogarithmicNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "d88b70111754e3660f80d3596a343ce42bf5ee84"
uuid = "aa2f6b4e-9042-5d33-9679-40d3a6b85899"
version = "0.4.2"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[LoweredCodeUtils]]
deps = ["JuliaInterpreter"]
git-tree-sha1 = "4bfb8b57df913f3b28a6bd3bdbebe9a50538e689"
uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b"
version = "2.1.0"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.6"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MatchCore]]
git-tree-sha1 = "90af9fe333f8c9851f952dfa7f335185c94567c0"
uuid = "5dd3f0b1-72a9-48ad-ae6e-79f673da005f"
version = "0.1.1"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Measures]]
git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f"
uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e"
version = "0.3.1"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[NiLang]]
deps = ["FixedPointNumbers", "LinearAlgebra", "LogarithmicNumbers", "MatchCore", "NiLangCore", "Reexport", "SparseArrays", "TupleTools"]
git-tree-sha1 = "3fe439482d8c08a15f929ae7278a6c7f737672d5"
uuid = "ab4ef3a6-0b42-11ea-31f6-e34652774712"
version = "0.9.1"
[[NiLangCore]]
deps = ["MatchCore", "TupleTools"]
git-tree-sha1 = "239f97ea947531cfe7a596746e31c8429c7169b9"
uuid = "575d3204-02a4-11ea-3f62-238caa8bf11e"
version = "0.10.3"
[[OpenJpeg_jll]]
deps = ["Libdl", "Libtiff_jll", "LittleCMS_jll", "Pkg", "libpng_jll"]
git-tree-sha1 = "e330ffff1c6a593fa44cc40c29900bee82026406"
uuid = "643b3616-a352-519d-856d-80112ee9badc"
version = "2.3.1+0"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[PCRE_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488"
uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc"
version = "8.44.0+0"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "1.1.0"
[[Pixman_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29"
uuid = "30392449-352a-5448-841d-b1acce4e97dc"
version = "0.40.1+0"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["Base64", "Dates", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "Suppressor"]
git-tree-sha1 = "44e225d5837e2a2345e69a1d1e01ac2443ff9fcb"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.9"
[[Poppler_jll]]
deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "OpenJpeg_jll", "Pkg", "libpng_jll"]
git-tree-sha1 = "e11443687ac151ac6ef6699eb75f964bed8e1faa"
uuid = "9c32591e-4766-534b-9725-b71a8799265b"
version = "0.87.0+2"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Reexport]]
git-tree-sha1 = "5f6c21241f0f655da3952fd60aa18477cf96c220"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.1.0"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[Revise]]
deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "Requires", "UUIDs", "Unicode"]
git-tree-sha1 = "410bbe13d9a7816e862ed72ac119bda7fb988c08"
uuid = "295af30f-e4ad-537b-8983-00126c2a3abe"
version = "3.1.17"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "LogExpFunctions", "OpenSpecFun_jll"]
git-tree-sha1 = "a50550fa3164a8c46747e62063b4d774ac1bcf49"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.5.1"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "745914ebcd610da69f3cb6bf76cb7bb83dcb8c9a"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.4"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[Suppressor]]
git-tree-sha1 = "a819d77f31f83e5792a76081eee1ea6342ab8787"
uuid = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
version = "0.2.0"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TikzPictures]]
deps = ["LaTeXStrings", "Poppler_jll", "Requires"]
git-tree-sha1 = "06b36e2baa9b97814ef1993207b71e2e23e9efb5"
uuid = "37f6aa50-8035-52d0-81c2-5a1d08754b2d"
version = "3.3.3"
[[TreeverseAlgorithm]]
deps = ["Requires"]
git-tree-sha1 = "4292bc608573c2047fd12b0a611787e77f5595ba"
uuid = "e1c63c57-2fea-45bf-a8bf-df3ea6afb545"
version = "0.1.0"
[[TupleTools]]
git-tree-sha1 = "62a7a6cd5a608ff71cecfdb612e67a0897836069"
uuid = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
version = "1.2.0"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[Viznet]]
deps = ["Compose", "Dierckx"]
git-tree-sha1 = "7a022ae6ac8b153d47617ed8c196ce60645689f1"
uuid = "52a3aca4-6234-47fd-b74a-806bdf78ede9"
version = "0.3.3"
[[XML2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a"
uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"
version = "2.9.12+0"
[[XSLT_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"]
git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a"
uuid = "aed1982a-8fda-507f-9586-7b0439959a61"
version = "1.1.34+0"
[[Xorg_libX11_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"]
git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527"
uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc"
version = "1.6.9+4"
[[Xorg_libXau_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e"
uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec"
version = "1.0.9+4"
[[Xorg_libXdmcp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4"
uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05"
version = "1.1.3+4"
[[Xorg_libXext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3"
uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3"
version = "1.3.4+4"
[[Xorg_libXrender_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96"
uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa"
version = "0.9.10+4"
[[Xorg_libpthread_stubs_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb"
uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74"
version = "0.1.0+3"
[[Xorg_libxcb_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"]
git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6"
uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b"
version = "1.13.0+3"
[[Xorg_xtrans_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845"
uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10"
version = "1.4.0+3"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zstd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6"
uuid = "3161d3a3-bdf6-5164-811a-617609db77b4"
version = "1.5.0+0"
[[libpng_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c"
uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f"
version = "1.6.38+0"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─e20e2d2e-4b28-4e32-8d80-ce029928a094
# ╟─f3e235e7-76b9-4c39-bc70-038539838ff4
# ╟─8308df59-3faa-4abf-8f05-119bbae48f64
# ╟─a3532a83-9fd3-4d24-b1bb-b52457317e51
# ╟─15657e4b-848e-43ad-a99f-37143d11705e
# ╟─34a6b7f4-7d72-485d-86dc-f4b1ba6174eb
# ╟─67d1b500-964e-4668-a7d0-ed93886446ca
# ╟─673992ed-6963-400a-a69b-d65d26c4f443
# ╟─6078758e-b392-4bdb-a1e7-44b135ce900e
# ╠═41e06e2a-b482-4e0f-8569-fee2ffd8aaaf
# ╟─dcf53d46-e259-4101-8530-9621094ee586
# ╟─6a88d26c-c895-4852-ab4f-37297b848731
# ╟─f9675365-36aa-430c-b747-3bc4f602e6fb
# ╟─46eb4ba9-dce6-4711-9c4d-3f16de6240de
# ╟─046f7559-4af9-4982-b5c3-335add0911d7
# ╟─0a039bfa-571e-4fad-b73c-1324d08777fc
# ╟─3f1e4d7a-32a7-4c7e-92dd-465bac925e63
# ╟─f68bcfb6-97ce-48d1-b0b8-e8466d4ac879
# ╟─3d4ba750-8d62-48ac-bf96-691397689ddc
# ╟─f4cb9212-181f-4338-b858-1d99c7f415e9
# ╟─31bde262-6352-4be0-b5cc-1781e3df2268
# ╟─83ff3fc3-bcd8-4235-a42f-1d75c7d6aa5b
# ╟─b308e270-6b40-4946-ac92-c705823f2c1e
# ╟─e483b3d4-d01c-4a98-8e68-e8120a7d95a7
# ╟─20c34526-c7c4-11eb-21fa-d706fd684a4c
# ╟─3f96abdf-fb5f-4d79-a288-e20b8c1f55d1
# ╟─5d51231a-8bf0-4414-9a39-cea264df84f2
# ╠═3b0fd2b5-5c6d-4d56-9e48-cda1493b4c72
# ╟─e10e0be8-b26e-4719-92dc-8ca46af0b4b5
# ╟─b1a9946b-82b4-4954-8bb9-5df035eaefe4
# ╠═00342a51-36d8-4fdd-aab7-ee02e2122c49
# ╠═40c1c48d-0e5a-4a47-b7e4-8f7666281249
# ╠═59df1f80-9be1-4b26-b263-ca0c7a0b9ab7
# ╟─8320d326-c1ab-4807-befb-13dda3480bf5
# ╟─93238608-3b86-49f1-ad60-9360e12cff1c
# ╟─f657c3fb-e140-4c76-8065-54f1cb6d05eb
# ╠═629a2549-745c-48a2-9bbc-a8f5fb046d11
# ╟─640e0029-7931-4afd-bdf9-fed317efbd8e
# ╟─6930345b-6e93-4b35-8d4f-91ad49141fa1
# ╠═090522bf-0ff2-4022-8460-aec6e37e936a
# ╠═88c30609-2f42-405e-a14c-dfab44aef23b
# ╟─2dc665a9-b131-4fef-acde-db346eb0f48b
# ╟─4e479f48-42cd-476d-8604-08ecbb503a90
# ╟─dc41e99a-f598-4bf6-9f76-ecdb04f5f40c
# ╟─97e0bae1-69ac-4cbf-b9d9-6b38180edd78
# ╟─355ba831-6be0-456a-8f94-36acd2365f17
# ╠═003c3e68-600e-4688-832b-5e061572b128
# ╠═1fb196f9-0f0a-42dc-b094-077cdf18d13d
# ╠═aa9a679e-63bc-4951-b6f4-65316e212bc8
# ╠═e6fd3c2d-cadd-40d7-a575-b1e68c45ee13
# ╟─02b7e1b4-4622-4e68-966e-ff79817557d1
# ╟─364fd613-0ebd-4b45-a3fd-f9baa8c487e3
# ╟─75d8283a-b331-4648-84a8-489e168e33f9
# ╟─2207c2fb-4a52-4766-8dd3-03872744aa74
# ╠═288331c3-2dfb-4941-985f-554be409c0ab
# ╠═12a30359-d6ff-4113-bab5-b198e908cf1a
# ╠═23ea88b4-6b89-462a-92da-0e8bdf5c73b5
# ╟─4bfdabd6-b7ff-40fb-b567-52910acb5a07
# ╟─2603147b-e7a2-4cae-b88f-2cfebe16bacb
# ╟─12d49e2e-cc6e-48d6-b11a-e7c311453bfc
# ╟─10312dc7-e861-4c89-b2fd-672cfe8850bf
# ╟─03f58f2a-24b6-4235-9102-71a19b9679ac
# ╠═22a5853e-4f9a-4da0-bc03-84a6b0061cfe
# ╠═1ecdc4d2-b3ca-4f5c-a454-0f0bc51b6ec2
# ╟─927ea209-2ccd-48d5-b69e-0a3c735bb496
# ╟─962b204c-8195-4938-944c-b7c4a52e70bd
# ╠═af58a0f8-e3fd-465f-b1ae-6fbd94123c91
# ╠═4f993061-c6c3-4acb-aef3-8453e7b83997
# ╠═6e5cf9bb-7cab-4da1-8831-541e0ee3bde8
# ╠═320e4114-f0da-4106-90ab-a9f7b0ef0099
# ╟─2d944e8d-e19d-48be-ab7f-c3e54e9f43ef
# ╟─4f53fa8e-ea9b-461f-8199-7bbe2a3ef544
# ╟─56ea4f5f-ea88-46d6-beed-b9a7afed315d
# ╟─124a9ecd-0bda-4823-9507-92efcf449d9c
# ╟─6819498c-7a46-48fa-9eec-38341dca72f9
# ╟─400f79cf-9260-4195-9582-0e8c486ddb5a
# ╟─2dad3acd-332c-46b9-8f84-21c076bdef41
# ╟─a674bde5-70e1-4d21-aedf-8977f8039c36
# ╟─bf696784-23d4-42b2-8ee1-bfec11ff8d78
# ╠═a7810352-7967-460d-abd7-361a324c20a9
# ╠═9193fcbb-ec4f-41b4-8fca-be8e183dea31
# ╠═a56445b5-e530-4035-9ac6-a2d196a6276a
# ╠═3fadf1d4-8fa2-4c02-aa21-b7969b465536
# ╠═12e933ca-13f3-414c-926a-f1bb1bbe66cf
# ╟─4403c183-5eeb-4fd0-87a4-4ad29e1f4dc2
# ╠═4c3f9b91-4f27-4fb8-a9db-1ddbfd62dbdd
# ╠═af71e2d7-a600-46fd-9a46-1b9d4607f06d
# ╟─1d3c7324-6828-47aa-b30e-bcac0e052213
# ╠═2993fe1f-042b-4c85-85b8-bcc7ed449a54
# ╟─048d482e-3e5b-496f-95d7-17589a5f6f11
# ╟─e22bbe66-56f5-40be-b464-1f8651e6a6ac
# ╟─28767d73-47a8-4f4b-b3dd-146c4ae3e038
# ╟─ea907d51-d4a9-48ca-90a5-bd91309ccfad
# ╟─3aa99be5-6747-4163-9bb6-ed8cd5ce19f6
# ╟─2f0263f5-2ace-4d4b-8d71-cee26c03122e
# ╟─03a21468-c2fe-4df7-ae8a-38b28a0efe2f
# ╟─f326d8e5-7117-4eed-b30d-0f64e5974426
# ╟─b9af3db6-5725-4190-b96c-f3fa41f07c93
# ╟─47cf7e85-c49f-4618-9689-e0de789625f6
# ╟─2fcf48fd-bedc-41d9-a433-68efd5dd0d20
# ╟─5060f5bb-8430-42aa-b61d-c88249edb323
# ╟─55dd53ed-6420-4426-95ae-feb47bf50f22
# ╟─c62a9f94-457f-496b-bee0-bb0db02aca5d
# ╟─bccadcb5-6d9f-4a70-b7c4-74e0e3d5f8c8
# ╟─b308ecb0-070e-4ad8-8009-dc60e75bbe01
# ╟─9123e669-19c7-47c1-a924-0c618b4a9c1f
# ╟─83d0c5fc-9cb7-4e37-bfdb-ed630b94d9b4
# ╠═c6bd40af-50ed-4cee-8043-60b2bac05058
# ╟─3d2feca5-43d3-4a46-ba1c-849c5ceeb676
# ╟─7ca616d1-caed-40cf-b768-b07af81a654d
# ╠═ddc662d7-6901-4885-9d2c-1876a2c9d2ff
# ╟─a54d5fc4-643c-47d2-be97-4626a060c9b4
# ╟─8b556fbe-275f-4e7b-94a0-7434ce81ad8b
# ╟─8e9c55f6-8175-4cb4-8798-91107c4d16ee
# ╠═873ef2c2-653e-425e-9732-b1ed19f7a0b7
# ╟─972d889c-c48c-470a-b710-aba9ecaacdaa
# ╟─54b3a283-7d42-431f-9ecb-48f37198409a
# ╠═590f56f7-5654-4493-9103-02a0fce6e945
# ╟─11964529-8e88-4743-a725-57fc5c525649
# ╟─45448141-214d-4e08-978e-5d1d25f763cd
# ╟─dfe3cc09-6e27-4166-9a4e-2dd22f1e08a2
# ╟─3d07e6d1-2964-4718-b7bc-114d82389aa4
# ╟─c00cd648-d685-4a17-9ddf-39c06dd5f066
# ╟─5390c3ba-1507-4db9-9972-8295cbe493bc
# ╟─33a0bda1-c943-4792-bc3d-fbf1adf16d0a
# ╟─a2e81e79-3f85-4eef-8639-f455f9165a25
# ╟─0a04c470-8f5a-4b56-b2e2-5b32e3b944f3
# ╟─dced36eb-3b84-4d08-8268-0ffb831e39b5
# ╠═141e21c0-1bdf-4e6b-b76d-129567a1180f
# ╠═0dc268fa-2b71-4915-b73b-3f1de9fbc157
# ╠═871f62a3-2c9c-445e-b1dc-4cba363fa604
# ╠═6a50f7ac-b5a5-444d-9042-81b82cb66aec
# ╠═bace7924-3aff-463f-9351-cc59191b469a
# ╠═923a5e2a-cc91-4404-913a-6e8012fa5834
# ╟─b12bcfbb-c7f2-4dc9-821a-e59365bf6fa4
# ╠═12d52dc8-da0b-46dc-9251-eb0cc9a39a7e
# ╠═ac35b26c-0585-4d2a-8fbf-bda9b141d6af
# ╠═f8616bf4-02e6-4d66-a0f6-d35db701e82c
# ╠═4e273d1f-a00e-49ca-9f8d-a0f6930550fb
# ╟─bee4ab06-4c60-4bec-89d0-b3c9a512f7a6
# ╟─717f51fb-bd0a-4bb4-a5fc-1f1cf5d56ed8
# ╟─db6b6481-7e67-4418-b907-13b38c77bac7
# ╠═d4726239-81af-4792-8472-c680508449c6
# ╠═a0be4807-52ed-4626-8009-97a79e36e2f1
# ╠═52dec342-d3de-4a88-8acb-0aa186bcc086
# ╠═280c9363-9b49-44f1-a240-b7f205ffc56b
# ╟─0b3735c2-695c-4225-843e-16ca17aac0eb
# ╟─d7942b37-f821-494a-8f18-5f267aa3457a
# ╟─865f049f-54d0-4d21-860e-062262edcb58
# ╟─c3b730a4-d5b4-471e-bd06-30ace6e8b8fe
# ╟─98f42f60-7870-4813-b0c1-728285c25f01
# ╟─c3c63865-f538-4d93-bef3-6b9c69cb177f
# ╠═66225c05-165e-4051-bb5e-4cfba579fd5b
# ╠═14291e8d-001f-4e26-b094-addb970cf530
# ╠═bf670e51-06f8-4094-949c-ca2d02fd0d01
# ╠═416e53e4-6123-430f-b433-4334b7e85298
# ╠═edd5f8df-9abd-4254-abf6-33ae31d88a8d
# ╠═c0ff9103-2aa8-45fd-bea5-1903c53196de
# ╠═e17e70cb-2d82-4a08-9f6e-e50dcaa325e3
# ╠═357fd442-6e6e-4b14-b2d0-efd3fa775d0b
# ╟─d2001eb2-45cb-4f07-aa99-dd84996359b7
# ╠═1bca53ef-3438-4db5-9900-9fee71936a62
# ╠═50f7070d-9f6f-4025-9be3-13812c3000eb
# ╟─fa3b6d6a-a55d-4097-8ad2-7dafb5f01d8c
# ╟─dabb4656-4aed-4168-8659-c0472528c41d
# ╟─0367be06-4185-4add-a04b-f696c5a43638
# ╟─c79e7651-975b-407c-8c8c-d0c5653ec570
# ╟─3fed55d7-dbed-4fc9-8410-2633d5200db6
# ╟─663180d2-8fe1-4996-a194-d38120ae05fd
# ╟─39884e8a-bc83-4ff4-85bc-cfaccb4674f2
# ╟─c24e7391-a187-4a7e-aab7-93027f7db965
# ╠═12734842-d66f-4bfc-a9ad-01adeb2450e0
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
================================================
FILE: src/NiLang.jl
================================================
module NiLang
using Reexport
@reexport using NiLangCore
import NiLangCore: invtype
using FixedPointNumbers: Q20f43, Fixed
import NiLangCore: empty_global_stacks!, loaddata
export Fixed43
const Fixed43 = Q20f43
include("utils.jl")
include("wrappers.jl")
include("vars.jl")
include("instructs.jl")
include("ulog.jl")
include("complex.jl")
include("autobcast.jl")
include("macros.jl")
include("autodiff/autodiff.jl")
include("stdlib/stdlib.jl")
include("deprecations.jl")
export AD
project_relative_path(xs...) = normpath(joinpath(dirname(dirname(pathof(@__MODULE__))), xs...))
end # module
================================================
FILE: src/autobcast.jl
================================================
export AutoBcast
"""
AutoBcast{T,N} <: IWrapper{T}
A vectorized variable.
"""
struct AutoBcast{T,N} <: IWrapper{T} x::Vector{T} end
AutoBcast(x::Vector{T}) where {T} = AutoBcast{T, length(x)}(x)
AutoBcast(x::AutoBcast{T,N}) where {T,N} = x # to avoid ambiguity error
AutoBcast{T,N}(x::AutoBcast{T,N}) where {T,N} = x
value(x::AutoBcast) = x.x
NiLangCore.chfield(x::AutoBcast, ::typeof(value), xval) = chfield(x, Val(:x), xval)
Base.zero(x::AutoBcast) = AutoBcast(zero(x.x))
Base.zero(::Type{AutoBcast{T,N}}) where {T,N} = AutoBcast{T,N}(zeros(T, N))
Base.length(ab::AutoBcast{T,N}) where {T, N} = N
for F1 in [:(Base.:-), :INC, :FLIP, :DEC]
@eval function $F1(a!::AutoBcast)
@instr @invcheckoff @inbounds for i=1:length(a!)
$F1(a!.x[i])
end
a!
end
end
for F2 in [:SWAP, :((inf::PlusEq)), :((inf::MinusEq)), :((inf::XorEq))]
F2 != :SWAP && @eval function $F2(a::AutoBcast, b::Real)
@instr @invcheckoff @inbounds for i=1:length(a)
$F2(a.x[i], b)
end
a, b
end
@eval function $F2(a::AutoBcast, b::AutoBcast)
@instr @invcheckoff @inbounds for i=1:length(a)
$F2(a.x[i], b.x[i])
end
a, b
end
end
for F3 in [:ROT, :IROT, :((inf::PlusEq)), :((inf::MinusEq)), :((inf::XorEq))]
if !(F3 in [:ROT, :IROT])
@eval function $F3(a::AutoBcast, b::Real, c::Real)
@instr @invcheckoff @inbounds for i=1:length(a)
$F3(a.x[i], b, c)
end
a, b, c
end
@eval function $F3(a::AutoBcast, b::Real, c::AutoBcast)
@instr @invcheckoff for i=1:length(a)
$F3(a.x[i], b, c.x[i])
end
a, b, c
end
end
@eval function $F3(a::AutoBcast, b::AutoBcast, c::Real)
@instr @invcheckoff @inbounds for i=1:length(a)
$F3(a.x[i], b.x[i], c)
end
a, b, c
end
@eval function $F3(a::AutoBcast, b::AutoBcast, c::AutoBcast)
@instr @invcheckoff @inbounds for i=1:length(a)
$F3(a.x[i], b.x[i], c.x[i])
end
a, b, c
end
end
(f::PlusEq{typeof(identity)})(x::T, y::T) where T<:AutoBcast = invoke(f, Tuple{T,T} where T, x, y)
(f::MinusEq{typeof(identity)})(x::T, y::T) where T<:AutoBcast = invoke(f, Tuple{T,T} where T, x, y)
================================================
FILE: src/autodiff/autodiff.jl
================================================
module AD
using ..NiLang
using NiLangCore
using MLStyle, TupleTools
import ..NiLang: ROT, IROT, SWAP,
chfield, value, NoGrad, INC, DEC, HADAMARD,
AddConst, SubConst, NEG, INV
using NiLangCore: default_constructor
export GVar, grad, Loss, NoGrad, @nograd
include("vars.jl")
include("stack.jl")
include("gradfunc.jl")
include("checks.jl")
include("instructs.jl")
include("ulog.jl")
include("jacobian.jl")
include("hessian_backback.jl")
include("complex.jl")
end
================================================
FILE: src/autodiff/checks.jl
================================================
export check_grad, nparams
export gradient_numeric
using FixedPointNumbers: Fixed
@nospecialize
isvar(x) = nparams(x) != 0
nparams(model) = nparams(NiLangCore.type2tuple(model))
nparams(x::AbstractArray{<:AbstractFloat}) = length(x)
nparams(x::AbstractArray{<:GVar}) = length(x)
nparams(x::AbstractArray) = sum(nparams, x)
nparams(x::Fixed) = 1
function nparams(x::Union{Tuple,NamedTuple})
res = 0
for xi in x
res += nparams(xi)
end
res
end
nparams(x::AbstractFloat) = 1
nparams(x::GVar) = 1
function tset(vfunc::Function, tp::Tuple, iloss)
map(i->i===iloss ? vfunc(tp[i]) : tp[i], (1:length(tp)...,))
end
function tset(value, tp::Tuple, iloss)
map(i->i===iloss ? value : tp[i], (1:length(tp)...,))
end
function update_var(args, iarg, i::Int, val)
args[iarg][i] += val
args
end
function update_var(args, iarg, ::Nothing, val)
tset(x->chfield(x, value, value(x) + val), args, iarg)
end
function ng_single(::Type{T}, f, args, kwargs, iarg, i, iloss, δ) where T
args = update_var(args, iarg, i, T(δ/2))
@instr f(args...; kwargs...)
pos = value(args[iloss])
@instr (~f)(args...; kwargs...)
args = update_var(args, iarg, i, -T(δ))
@instr f(args...; kwargs...)
neg = value(args[iloss])
@instr (~f)(args...; kwargs...)
args = update_var(args, iarg, i, T(δ/2))
(pos - neg)/δ
end
function ng_single(::Type{T}, f, args, kwargs, iarg, i, iloss, δ) where T<:Complex
res = zero(T)
for dd = [δ, im*δ]
args = update_var(args, iarg, i, dd/2)
@instr f(args...; kwargs...)
pos = value(args[iloss])
@instr (~f)(args...; kwargs...)
args = update_var(args, iarg, i, -dd)
@instr f(args...; kwargs...)
neg = value(args[iloss])
@instr (~f)(args...; kwargs...)
args = update_var(args, iarg, i, dd/2)
if dd == δ
res += (pos - neg)/δ
else
res += im*(pos - neg)/δ
end
end
res
end
function ng(f, args, iarg; iloss::Int, δ=1e-5, kwargs...)
x = args[iarg]
T = eltype(x)
if x isa AbstractArray
res = zero(x)
for i = 1:length(x)
res[i] = ng_single(T, f, args, kwargs, iarg, i, iloss, δ)
end
return res
else
ng_single(T, f, args, kwargs, iarg, nothing, iloss, δ)
end
end
"""
gradient_numeric(f, args...; iloss, kwargs...)
Numeric differentiating f(args..., kwargs...).
"""
function gradient_numeric(f, args; iloss::Int, kwargs...)
map(1:length(args)) do iarg
if isvar(args[iarg])
ng(f, args, iarg; iloss=iloss, kwargs...)
else
0
end
end
end
"""
check_grad(f, args; atol::Real=1e-8, verbose::Bool=false, iloss::Int, kwargs...)
Return true if the gradient of `f(args..., kwargs...)` is reversible.
"""
function check_grad(f, args; atol::Real=1e-4, verbose::Bool=false, iloss::Int, kwargs...)
vars = ((iarg for iarg in 1:length(args) if isvar(args[iarg]))...,)
initial_vars = deepcopy(vars)
ngs = gradient_numeric(f, args; kwargs..., iloss=iloss)
gs = gradient(f, args; kwargs..., iloss=iloss)
verbose && @show ngs
verbose && @show gs
if !all(isapprox.(ngs, gs, atol=atol))
verbose && println("gradient not match: $ngs v.s. $gs")
return false
end
if !world_similar(initial_vars, vars, atol=atol, verbose=verbose)
verbose && println("world changed during obtaining gradient.")
return false
end
return true
end
@specialize
================================================
FILE: src/autodiff/complex.jl
================================================
@i @inline function :(+=)(angle)(r!::T, x::Complex{T}) where T<:GVar
r! += atan(x.im, x.re)
end
@i @inline function :(+=)(abs2)(y!::T, a::Complex{T}) where T<:GVar
y! += a.re^2
y! += a.im^2
end
@i @inline function :(+=)(abs)(y!::T, a::Complex{T}) where T<:GVar
@routine @invcheckoff begin
y2 ← zero(y!)
y2 += abs2(a)
end
y! += sqrt(y2)
~@routine
end
Base.zero(x::Complex{T}) where T<:GVar = Complex(zero(T), zero(T))
Base.zero(::Type{Complex{T}}) where T<:GVar = Complex(zero(T), zero(T))
Base.one(x::Complex{T}) where T<:GVar = Complex(one(T), zero(T))
Base.one(::Type{Complex{T}}) where T<:GVar = Complex(one(T), zero(T))
================================================
FILE: src/autodiff/gradfunc.jl
================================================
export Grad, NGrad, Hessian, gradient
"""
NGrad{N,FT} <: Function
Obtain gradients `Grad(f)(Val(i), args..., kwargs...)`, where `i` is the index of loss in `args`. `Grad` object calls forward first, and then backward.
!!! note
`Val(1)` is specially optimized, so putting the loss as the first parameter can avoid potential overhead.
```
"""
struct NGrad{N,FT} <: Function
f::FT
end
function NGrad{N}(f::FT) where {N,FT}
NGrad{N,FT}(f)
end
const Grad{FT} = NGrad{1,FT}
const Hessian{FT} = NGrad{2,FT}
Base.show_function(io::IO, b::NGrad{N}, compact::Bool) where {N} = print(io, "$(b.f)"*"'"^N)
Base.show_function(io::IO, ::MIME"text/plain", b::NGrad{N}, compact::Bool) where {N} = print(io, b)
Base.display(bf::NGrad) = print(bf)
(_::Type{Inv{NGrad{N}}})(f::NGrad{M}) where {M, N} = NGrad{M-N}(f.f)
(_::Type{Inv{NGrad{M}}})(f::NGrad{M}) where {M} = f.f
@i function (g::Grad)(il::Val{iloss}, args...; kwargs...) where iloss
protectf(g).f(args...; kwargs...)
GVar.(args)
INC(args |> tget(iloss) |> grad)
(~protectf(g).f)(args...; kwargs...)
end
@i function (g::Grad)(il::Val{1}, x, ys...; kwargs...)
protectf(g).f(x, ys...; kwargs...)
GVar(x)
INC(x |> grad)
GVar.(ys)
(~protectf(g).f)(x, ys...; kwargs...)
end
@i function (g::Grad)(args...; iloss::Int, kwargs...)
protectf(g).f(args...; kwargs...)
GVar.(args)
INC(args |> tget(iloss) |> grad)
(~protectf(g).f)(args...; kwargs...)
end
@generated function gradient(::Val{iloss}, f, args::NTuple{N,Any}; kwargs...) where {iloss,N}
newres = gensym()
newargs = Any[:(GVar($newres[$i])) for i=1:N]
newargs[iloss] = :(GVar($newres[$iloss], one($newres[$iloss])))
quote
$newres = f(args...; kwargs...)
grad((~f)($(newargs...); kwargs...))
end
end
gradient(f, args; iloss::Int, kwargs...) = gradient(Val(iloss), f, args; kwargs...)
================================================
FILE: src/autodiff/hessian_backback.jl
================================================
export hessian_backback
@i function backback(f, args...; index::Int, iloss::Int, kwargs...)
# forward
Grad(f)(args...; kwargs..., iloss=iloss)
for i = 1:length(args)
GVar(args |> tget(i) |> grad)
GVar(args |> tget(i) |> value)
end
(args |> tget(index) |> grad |> grad) += 1
# backward#2
(~Grad(f))(args...; kwargs..., iloss=iloss)
end
"""
hessian_backback(f, args; iloss::Int, kwargs...)
Obtain the Hessian matrix of `f(args..., kwargs...)` by back propagating adjoint program.
"""
function hessian_backback(f, args; iloss::Int, kwargs...)
N = length(args)
hmat = zeros(N, N)
for i=1:N
if !(args[i] isa Integer || args[i] isa AbstractVector)
res = backback(f, args...; kwargs..., index=i, iloss=iloss)
hmat[:,i] .= map(x->grad(value(x)), res[2:end])
end
end
hmat
end
function hessian_numeric(f, args; iloss::Int, η=1e-5, kwargs...)
narg = length(args)
res = zeros(narg, narg)
largs = [args...]
for i = 1:narg
if nparams(args[i]) == 1
@instr (largs[i] |> value) += η/2
gpos = gradient(f, (largs...,); iloss=iloss, kwargs...)
@instr (largs[i] |> value) -= η
gneg = gradient(f, (largs...,); iloss=iloss, kwargs...)
@instr (largs[i] |> value) += η/2
res[:,i] .= (gpos .- gneg)./η
end
end
return res
end
function local_hessian_numeric(f, args; kwargs...)
nargs = length(args)
hes = zeros(nargs,nargs,nargs)
for j=1:nargs
if nparams(args[j]) == 1
hes[:,:,j] .= hessian_numeric(f, args; kwargs..., iloss=j)
end
end
mask = BitArray(nparams.(args) .== 1)
hes[mask, mask, mask]
end
================================================
FILE: src/autodiff/instructs.jl
================================================
# unary
@i @inline function NEG(a!::GVar)
NEG(a!.x)
NEG(a!.g)
end
function INV(x!::GVar{T}) where T
x2 = x!.x ^ 2
GVar(INV(x!.x), -x!.g * x2)
end
@i @inline function DEC(a!::GVar)
DEC(a!.x)
end
# +-
@i @inline function :(-=)(identity)(a!::GVar, b::GVar)
a!.x -= b.x
b.g += a!.g
end
# inv
@eval @i @inline function :(-=)(inv)(out!::GVar{T}, y::GVar) where T
out!.x -= inv(y.x)
@routine @invcheckoff begin
@zeros T a1
a1 += y.x ^ 2
end
y.g -= out!.g / a1
~@routine
end
# +- (triple)
@i @inline function :(-=)(+)(out!::GVar, x::GVar, y::GVar)
out!.x -= x.x + y.x
x.g += out! |> grad
y.g += out! |> grad
end
@i @inline function :(-=)(+)(out!::GVar, x::GVar, y::Real)
out!.x -= (x |> value) + (y |> value)
x.g += out! |> grad
end
@i @inline function :(-=)(+)(out!::GVar, x::Real, y::GVar)
out!.x -= (x |> value) + (y |> value)
y.g += out! |> grad
end
@i @inline function :(-=)(-)(out!::GVar, x::GVar, y::GVar)
out!.x -= x.x - y.x
x.g += out! |> grad
y.g -= out! |> grad
end
@i @inline function :(-=)(-)(out!::GVar, x::Real, y::GVar)
out!.x -= (x |> value) - y.x
y.g -= out!.g
end
@i @inline function :(-=)(-)(out!::GVar, x::GVar, y::Real)
out!.x -= x.x - (y |> value)
x.g += out! |> grad
end
# NOTE: it will error on `SWAP(a!::GVar, b)` or `SWAP(a!, b:GVar)`
@i @inline function SWAP(a!::GVar, b!::GVar)
SWAP(a! |> value, b! |> value)
SWAP(a!.g, b!.g)
end
# */
@i @inline function :(-=)(*)(out!::GVar, x::GVar, y::GVar)
out!.x -= x.x * y.x
x.g += out!.g * y.x
y.g += x.x * out!.g
end
@i @inline function :(-=)(*)(out!::GVar, x::Real, y::GVar)
out!.x -= (x |> value) * y.x
y.g += (x |> value) * out!.g
end
@i @inline function :(-=)(*)(out!::GVar, x::GVar, y::Real)
out!.x -= x.x * (y |> value)
x.g += out!.g * (y |> value)
end
for DIV in [:/, :÷]
@eval @i @inline function :(-=)($DIV)(out!::GVar{T}, x::GVar, y::GVar) where T
out!.x -= $DIV(x.x, y.x)
@routine @invcheckoff begin
a1 ← zero(out! |> grad)
a2 ← zero(out! |> grad)
a1 += x.x * out!.g
a2 += $DIV(a1, y.x)
end
x.g += $DIV(out!.g, y.x)
y.g -= $DIV(a2, y.x)
~@routine
end
@eval @i @inline function :(-=)($DIV)(out!::GVar{T}, x::Real, y::GVar) where T
out!.x -= $DIV(x, y.x)
@routine @invcheckoff begin
a1 ← zero(out!.g)
a2 ← zero(out!.g)
a1 += x * out!.g
a2 += $DIV(a1, y.x)
end
y.g -= $DIV(a2, y.x)
~@routine
end
@eval @i @inline function :(-=)($DIV)(out!::GVar, x::GVar, y::Real)
out!.x -= $DIV(x.x, y)
x.g += $DIV(out!.g, y)
end
end
@i @inline function :(-=)(^)(out!::GVar{T}, x::GVar, n::GVar) where T
# grad x
@routine @invcheckoff begin
@zeros T anc1 anc2 anc3 jac1 jac2 nx_1
nx_1 += n.x - 1
anc1 += x.x ^ nx_1
jac1 += anc1 * n.x
# get grad of n
anc2 += log(x.x)
anc3 += anc1 * x.x
jac2 += anc3 * anc2
end
out!.x -= anc1 * x.x
x.g += out!.g * jac1
n.g += out!.g * jac2
~@routine
end
@i @inline function :(-=)(^)(out!::GVar{T}, x::GVar, n::Real) where T
@routine @invcheckoff begin
anc1 ← zero(x.x)
jac ← zero(x.x)
nx_1 ← zero(n)
nx_1 += n - 1
anc1 += x.x ^ nx_1
jac += anc1 * n
end
out!.x -= anc1 * x.x
x.g += out!.g * jac
~@routine
end
@i @inline function :(-=)(^)(out!::GVar{T}, x::Real, n::GVar) where T
# get jac of n
@routine @invcheckoff begin
anc1 ← zero(x)
anc2 ← zero(x)
jac ← zero(x)
anc1 += log(x)
anc2 += x ^ n.x
jac += anc1*anc2
end
out!.x -= anc2
n.g += out!.g * jac
~@routine
end
for (OP, F) in [(:min, :<), (:max, :>)]
@eval @i @inline function :(-=)($OP)(out!::GVar{T}, x::GVar, y::GVar) where T
if $F(x, y)
out!.x -= x.x
x.g += out!.g
else
out!.x -= y.x
y.g += out!.g
end
end
@eval @i @inline function :(-=)($OP)(out!::GVar{T}, x::GVar, y::Real) where T
if $F(x, y)
out!.x -= x.x
x.g += out!.g
else
out!.x -= y.x
end
end
@eval @i @inline function :(-=)($OP)(out!::GVar{T}, x::Real, y::GVar) where T
if $F(x, y)
out!.x -= x.x
else
out!.x -= y.x
y.g += out!.g
end
end
end
@i @inline function :(-=)(atan)(out!::GVar{T}, y::GVar, x::GVar) where T
out!.x -= atan(y.x, x.x)
@routine @invcheckoff begin
@zeros T xy2 jac_x jac_y
xy2 += abs2(x.x)
xy2 += abs2(y.x)
jac_y += x.x / xy2
jac_x += (-y.x) / xy2
end
y.g += out!.g * jac_y
x.g += out!.g * jac_x
~@routine
end
@i @inline function :(-=)(atan)(out!::GVar{T}, y::Real, x::GVar) where T
out!.x -= atan(y, x.x)
@routine @invcheckoff begin
@zeros T xy2 jac_x
xy2 += abs2(x.x)
xy2 += abs2(y)
jac_x += (-y) / xy2
end
x.g += out!.g * jac_x
~@routine
end
@i @inline function :(-=)(atan)(out!::GVar{T}, y::GVar, x::Real) where T
out!.x -= atan(y.x, x)
@routine @invcheckoff begin
@zeros T xy2 jac_y
xy2 += abs2(x)
xy2 += abs2(y.x)
jac_y += x / xy2
end
y.g += out!.g * jac_y
~@routine
end
@i @inline function :(-=)(atan)(out!::GVar{T}, x::GVar) where T
out!.x -= atan(x.x)
@routine @invcheckoff begin
xy2 ← one(T)
xy2 += abs2(x.x)
end
x.g += out!.g / xy2
~@routine
end
@i @inline function :(-=)(abs)(out!::GVar, x::GVar{T}) where T
out!.x -= abs(x.x)
if (x > 0, ~)
x.g += out!.g
else
x.g -= out!.g
end
end
@i @inline function :(-=)(abs2)(out!::GVar, x::GVar{T}) where T
out!.x -= abs2(x.x)
x.g += out!.g * x.x
x.g += out!.g * x.x
end
for op in [:*, :/, :^, :+, :-, :atan, :max, :min]
@eval @nograd :(-=)($op)(out!::GVar, x::Real, y::Real)
@eval @nograd :(-=)($op)(out!::Real, x::Real, y::GVar)
@eval @nograd :(-=)($op)(out!::Real, x::GVar, y::GVar)
@eval @nograd :(-=)($op)(out!::Real, x::GVar, y::Real)
end
@i @inline function :(-=)(sqrt)(out!::GVar, x::GVar{T}) where T
if x.x != 0
@routine @invcheckoff begin
@zeros T anc1 anc2
anc1 += sqrt(x.x)
anc2 += 2 * anc1
end
out!.x -= anc1
x.g += out!.g / anc2
~@routine
end
end
@i @inline function :(-=)(exp)(out!::GVar, x::GVar{T}) where T
@routine @invcheckoff begin
anc1 ← zero(T)
anc1 += exp(x.x)
end
out!.x -= anc1
x.g += out!.g * anc1
~@routine
end
@i @inline function :(-=)(log)(out!::GVar, x::GVar{T}) where T
out!.x -= log(x.x)
x.g += out!.g / x.x
end
@i @inline function :(-=)(sin)(out!::GVar, x::GVar{T}) where T
@routine @invcheckoff begin
@zeros T s c
(s, c) += sincos(x.x)
end
out!.x -= s
x.g += out!.g * c
~@routine
end
@i @inline function :(-=)(sinh)(out!::GVar, x::GVar{T}) where T
out!.x -= sinh(x.x)
@routine @invcheckoff begin
anc1 ← zero(x.x)
anc1 += cosh(x.x)
end
x.g += out!.g * anc1
~@routine
end
@i @inline function (:-=)(asin)(out!::GVar, x::GVar{T}) where T
out!.x -= asin(x.x)
@routine @invcheckoff begin
@zeros T sqrt_1_x2 x2
x2 += x.x ^ 2
sqrt_1_x2 += sqrt(x2 |> NEG |> AddConst(1))
end
x.g += out!.g / sqrt_1_x2
~@routine
end
@i @inline function (:-=)(cos)(out!::GVar, x::GVar{T}) where T
@routine @invcheckoff begin
@zeros T s c
(s, c) += sincos(x.x)
end
out!.x -= c
x.g -= out!.g * s
~@routine
end
@i @inline function :(-=)(cosh)(out!::GVar, x::GVar{T}) where T
out!.x -= cosh(x.x)
@routine @invcheckoff begin
anc1 ← zero(x.x)
anc1 += sinh(x.x)
end
x.g += out!.g * anc1
~@routine
end
@i @inline function :(-=)(acos)(out!::GVar, x::GVar{T}) where T
out!.x -= acos(x.x)
@routine @invcheckoff begin
@zeros T sqrt_1_x2 x2
x2 += x.x ^ 2
sqrt_1_x2 += sqrt(x2 |> NEG |> AddConst(1))
end
x.g -= out!.g / sqrt_1_x2
~@routine
end
@i @inline function :(-=)(tan)(out!::GVar, x::GVar{T}) where T
@routine @invcheckoff begin
anc1 ← zero(x.x)
anc2 ← one(x.x)
anc1 += tan(x.x)
anc2 += anc1^2
end
out!.x -= anc1
x.g += out!.g * anc2
~@routine
end
@i @inline function :(-=)(tanh)(out!::GVar, x::GVar{T}) where T
@routine @invcheckoff begin
anc1 ← zero(x.x)
anc2 ← one(x.x)
anc1 += tanh(x.x)
anc2 -= anc1^2
end
out!.x -= anc1
x.g += out!.g * anc2
~@routine
end
@i @inline function :(-=)(sincos)(out!::Tuple{T1,T1}, x::GVar{T}) where {T1<:GVar, T}
@routine @invcheckoff begin
s ← zero(T)
c ← zero(T)
(s, c) += sincos(x.x)
end
(out! .|> value) -= (s, c)
x.g += (out!.:1 |> grad) * c
x.g -= (out!.:2 |> grad) * s
~@routine
end
for op in [:sqrt, :exp, :log, :sin, :cos, :tanh, :abs, :abs2, :identity, :inv]
@eval @nograd :(-=)($op)(out!::Real, x::GVar)
@eval @nograd :(-=)($op)(out!::GVar, x::Real)
end
@nograd :(-=)(sincos)(out!::Tuple{<:Real,<:Real}, x::GVar)
@nograd :(-=)(sincos)(out!::Tuple{<:GVar,<:GVar}, x::Real)
@i @inline function IROT(a!::GVar, b!::GVar, θ::GVar)
IROT(a!.x, b!.x, θ.x)
NEG(θ |> value)
θ.x -= π/2
ROT(a!.g, b!.g, θ.x)
θ.g += a!.x * a!.g
θ.g += b!.x * b!.g
θ.x += π/2
NEG(θ |> value)
ROT(a!.g, b!.g, π/2)
end
@i @inline function IROT(a!::GVar, b!::GVar, θ::Real)
IROT(a!.x, b!.x, θ)
NEG(θ)
θ -= π/2
ROT(a!.g, b!.g, θ)
θ += π/2
NEG(θ)
ROT(a!.g, b!.g, π/2)
end
@nograd IROT(a!::Real, b!::Real, θ::GVar)
export primitive_grad
function primitive_grad end
@i @inline function (mf::MinusEq)(out!::GVar, args...; kwargs...)
out!.x -= mf.f((args .|> value)...; kwargs...)
(args .|> grad) .+= (@skip! ntuple(x->out!.g, length(args))) .* (@skip! primitive_grad(mf.f, (args .|> value)...; kwargs...)) # unsafe statement, error on recursive gradient
end
@i @inline function (mf::MinusEq)(out!::GVar, x::GVar; kwargs...)
out!.x -= mf.f(x |> value; kwargs...)
x.g += (@skip! out!.g) * (@skip! primitive_grad(mf.f, x.x; kwargs...)) # unsafe statement
end
@i @inline function :(-=)(convert)(out!::GVar{Tx, Tg}, y::GVar) where {Tx, Tg}
out!.x -= convert(y.x)
y.g += convert(out!.g)
end
@i @inline function HADAMARD(x::GVar, y::GVar)
HADAMARD(x.x, y.x)
HADAMARD(x.g, y.g)
end
@i @inline function (f::AddConst)(y::GVar)
y.x += f.x
end
# more data views
for (DT, OP, NOP) in [(:AddConst, :+, :-, :add), (:SubConst, :-, :+)]
@eval chfield(x::GVar, ac::$DT, xval::GVar) = GVar($NOP(xval.x, ac.x), xval.g)
end
#chfield(x::T, ::typeof(INV), xval::T) where T<:GVar = GVar(INV(xval.x), -xval.g*(xval.x^2))
#chfield(x::T, ::typeof(NEG), xval::T) where T<:GVar = GVar(-xval.x, -xval.g)
for F in [:INV, :NEG, :FLIP, :INC, :DEC]
@eval NiLangCore.chfield(x::T, ::typeof($F), xval::T) where T<:GVar = (~$F)(xval)
end
================================================
FILE: src/autodiff/jacobian.jl
================================================
export jacobian, jacobian_repeat
wrap_tuple(x, args) = length(args) == 1 ? (x,) : x
"""
jacobian_repeat(f, args...; iin::Int, iout::Int=iin, kwargs...)
Get the Jacobian matrix for function `f(args..., kwargs...)` using repeated computing gradients for each output.
One can use key word arguments `iin` and `iout` to specify the input and output tensor.
"""
function jacobian_repeat(f, args...; iin::Int, iout::Int=iin, kwargs...)
_check_input(args, iin, iout)
N = length(args[iout])
res = zeros(eltype(args[iin]), length(args[iin]), N)
xargs = wrap_tuple(f(args...; kwargs...), args)
for i = 1:N
gxargs = GVar.(xargs)
@inbounds gxargs[iout][i] = GVar(value(gxargs[iout][i]), one(eltype(xargs[iout])))
@inbounds res[:,i] .= vec(grad.(wrap_tuple((~f)(gxargs...; kwargs...), gxargs)[iin]))
end
return res
end
_copy(x) = x
_copy(x::AbstractArray) = copy(x)
"""
jacobian(f, args...; iin::Int, iout::Int=iin, kwargs...)
Get the Jacobian matrix for function `f(args..., kwargs...)` using vectorized variables in the gradient field.
One can use key word arguments `iin` and `iout` to specify the input and output tensor.
"""
function jacobian(f, args...; iin::Int, iout::Int=iin, kwargs...)
_check_input(args, iin, iout)
args = wrap_tuple(f(args...; kwargs...), args)
ABT = AutoBcast{eltype(args[iout]), length(args[iout])}
_args = map(i-> i==iout ? wrap_jacobian(ABT, args[i]) : wrap_bcastgrad(ABT, args[i]), 1:length(args))
_args = wrap_tuple((~f)(_args...; kwargs...), args)
out = zeros(eltype(args[iin]), length(args[iin]), length(args[iout]))
for i=1:length(args[iin])
@inbounds out[i,:] .= grad(_args[iin][i]).x
end
out
end
function wrap_jacobian(::Type{AutoBcast{T,N}}, outarray::AbstractArray{T}) where {T,N}
map(k->GVar(outarray[k], AutoBcast{T,N}(onehot(T, N, k))), LinearIndices(outarray))
end
function wrap_bcastgrad(::Type{AutoBcast{T,N}}, x::XT) where {T,N,XT}
GVar(x, zero(AutoBcast{XT,N}))
end
function wrap_bcastgrad(::Type{AutoBcast{T,N}}, x::Union{Integer, Function}) where {T,N}
x
end
function wrap_bcastgrad(::Type{AutoBcast{T,N}}, x::NoGrad) where {T,N}
(~NoGrad)(x)
end
function wrap_bcastgrad(::Type{AutoBcast{T,N}}, x::Union{Tuple,AbstractArray}) where {T,N}
wrap_bcastgrad.(AutoBcast{T,N}, x)
end
function onehot(::Type{T}, N::Int, k::Int) where T
res = zeros(T, N)
res[k] = one(T)
res
end
function _check_input(args, iin, iout)
if !(args[iin] isa AbstractArray && args[iout] isa AbstractArray)
throw(ArgumentError("argument at position $iin and $iout are not arrays."))
elseif (eltype(args[iin]) != eltype(args[iout]))
throw(ArgumentError("argument at position $iin and $iout do not have the same type."))
end
end
================================================
FILE: src/autodiff/stack.jl
================================================
# This is a patch for loading a data to GVar correctly.
import NiLangCore
NiLangCore.loaddata(::Type{GT}, x::T) where {T, GT<:GVar{T}} = convert(GT, x)
function NiLangCore.loaddata(t::Type{VT}, x::AbstractVector) where {T, VT<:AbstractVector{T}}
convert.(T, x)
end
function NiLangCore.loaddata(t::VT, x::AbstractVector) where {T, VT<:AbstractVector{T}}
convert(VT, NiLangCore.loaddata.(t, x))
end
function NiLangCore.loaddata(::Type{T}, x::XT) where {N, T<:Tuple{N}, XT<:Tuple{N}}
ntuple(i=>NiLangCore.loaddata.(T.parameters[i], [i]), N)
end
================================================
FILE: src/autodiff/ulog.jl
================================================
@i function (:-=)(gaussian_log)(y!::GVar{T}, x::GVar{T}) where T
y!.x -= gaussian_log(x.x)
@routine @invcheckoff begin
exp_x ← zero(x)
jac ← zero(x)
exp_x += exp(-x)
end
x.g += y!.g * (exp_x |> AddConst(1) |> INV)
~@routine
end
@i function (:-=)(gaussian_nlog)(y!::GVar{T}, x::GVar{T}) where T
y!.x -= gaussian_nlog(x.x)
@routine @invcheckoff begin
exp_x ← zero(x)
exp_x += exp(-x)
end
x.g -= y!.g * (exp_x |> SubConst(1) |> INV)
~@routine
end
@i function :(-=)(convert)(out!::GVar{Tx, Tg}, y::ULogarithmic) where {Tx, Tg}
out! -= exp(y.log)
end
================================================
FILE: src/autodiff/vars.jl
================================================
######## GVar, a bundle that records gradient
"""
GVar{T,GT} <: IWrapper{T}
GVar(x)
Add gradient information to variable `x`, where `x` can be a real number or a general structure.
If it is a non-integer real number, it will wrap the element with a gradient field,
otherwise it will propagate into the type and wrap the elements with `GVar`.
Runing a program backward will update the gradient fields of `GVar`s. The following is a toy using case.
### Example
```jldoctest; setup=:(using NiLang)
julia> using NiLang.AD: GVar, grad
julia> struct A{T}
x::T
end
julia> GVar(A(2.0+3im), A(3.0+3im))
A{Complex{GVar{Float64, Float64}}}(GVar(2.0, 3.0) + GVar(3.0, 3.0)*im)
julia> @i function f(a::A, b::A)
a.x += log(b.x)
end
julia> outputs = f(A(2.0+3im), A(2.0-1im)) # forward pass
(A{ComplexF64}(2.8047189562170503 + 2.536352390999194im), A{ComplexF64}(2.0 - 1.0im))
julia> outputs_with_gradients = (GVar(outputs[1], A(3.0+3im)), GVar(outputs[2])) # wrap `GVar`
(A{Complex{GVar{Float64, Float64}}}(GVar(2.8047189562170503, 3.0) + GVar(2.536352390999194, 3.0)*im), A{Complex{GVar{Float64, Float64}}}(GVar(2.0, 0.0) - GVar(1.0, -0.0)*im))
julia> inputs_with_gradients = (~f)(outputs_with_gradients...) # backward pass
(A{Complex{GVar{Float64, Float64}}}(GVar(2.0, 3.0) + GVar(3.0, 3.0)*im), A{Complex{GVar{Float64, Float64}}}(GVar(2.0, 1.8) - GVar(1.0, -0.6000000000000002)*im))
julia> grad(inputs_with_gradients)
(A{ComplexF64}(3.0 + 3.0im), A{ComplexF64}(1.8 + 0.6000000000000002im))
```
The outputs of `~f` are gradients for input variables, one can use `grad` to take the gradient fields recursively.
"""
struct GVar{T,GT} <: IWrapper{T}
x::T
g::GT
function GVar{T,GT}(x::T, g::GT) where {T,GT}
new{T,GT}(x, g)
end
function GVar(x::T, g::T) where T<:Real
new{T,T}(x, g)
end
function GVar{T,GT}(x::T2) where {T,T2,GT}
new{T,GT}(T(x), zero(GT))
end
function GVar(x::T, g::GT) where {T,GT}
new{T,GT}(x, g)
end
end
# `GVar` and `~GVar` on composite types
@generated function GVar(x::Type{T}) where T
ps = GVar.(T.parameters)
if length(ps) == 0
:($(getfield(T.name.module, nameof(T))))
else
:($(getfield(T.name.module, nameof(T))){$(ps...)})
end
end
@generated function GVar(x::Type{T}, y::Type{T}) where T
:($(getfield(T.name.module, nameof(T))){$(GVar.(T.parameters, T.parameters)...)})
end
@generated function (_::Type{Inv{GVar}})(x::Type{T}) where T
:($(getfield(T.name.module, nameof(T))){$((~GVar).(T.parameters)...)})
end
# `GVar` and `~GVar` on composite vars
@generated function GVar(x::T) where T
Expr(:new, GVar(T), [:(GVar(x.$NAME)) for NAME in fieldnames(T)]...)
end
@generated function GVar(x::T, g::T) where T
Expr(:new, GVar(T, T), [:(GVar(x.$NAME, g.$NAME)) for NAME in fieldnames(T)]...)
end
@generated function (_::Type{Inv{GVar}})(x::T) where T
Expr(:new, (~GVar)(T), [:((~GVar)(x.$NAME)) for NAME in fieldnames(T)]...)
end
for T in [:Real]
## differentiable elementary types
@eval GVar(::Type{ET}) where ET<:$T = GVar{ET,ET}
@eval GVar(::Type{ET}, ::Type{ET}) where ET<:$T = GVar{ET,ET}
@eval (_::Type{Inv{GVar}})(::Type{GVar{ET,GT}}) where {ET<:$T,GT} = ET
## differentiable elementary vars
@eval GVar(x::$T) = GVar(x, zero(x))
@eval @inline function (_::Type{Inv{GVar}})(x::GVar{<:$T})
@invcheck x.g zero(x.x)
x.x
end
end
for T in [:Integer, :Bool, :Function, :String, :Char, :Nothing]
## non-differentiable elementary types
@eval GVar(::Type{ET}) where ET<:$T = ET
@eval GVar(::Type{ET}, ::Type{ET}) where ET<:$T = GVar{ET,ET}
@eval (_::Type{Inv{GVar}})(::Type{ET}) where ET<:$T = ET
## non-differentiable elementary vars
@eval GVar(x::$T) = x
@eval (_::Type{Inv{GVar}})(x::$T) = x
end
for T in [:Tuple, :AbstractArray]
## broadcastable elementary types
@eval GVar(x::$T) = GVar.(x)
@eval GVar(x::$T, y::$T) = GVar.(x, y)
@eval (_::Type{Inv{GVar}})(x::$T) = (~GVar).(x)
end
# no gradient wrapper
GVar(x::NoGrad) = (~NoGrad)(x)
# define on complex numbers to fix ambiguity errors
GVar(x::Complex) = Complex(GVar(x.re), GVar(x.im))
GVar(x::Complex, y::Complex) = Complex(GVar(x.re, y.re), GVar(x.im, y.im))
(_::Type{Inv{GVar}})(x::Complex) = Complex((~GVar)(x.re), (~GVar)(x.im))
Base.copy(b::GVar) = GVar(b.x, copy(b.g))
Base.zero(x::GVar) = GVar(Base.zero(x.x), Base.zero(x.g))
Base.zero(::Type{<:GVar{T,GT}}) where {T,GT} = GVar(zero(T), zero(GT))
Base.one(x::GVar) = GVar(Base.one(x.x), Base.zero(x.g))
Base.one(::Type{<:GVar{T}}) where T = GVar(one(T))
Base.adjoint(b::GVar) = GVar(b.x', b.g')
Base.:-(b::GVar) = GVar(-b.x, -b.g)
Base.isapprox(x::GVar, y::GVar; kwargs...) = isapprox(x.x, y.x; kwargs...) && isapprox(x.g, y.g; kwargs...)
# define kernel and field views
"""
grad(var)
Get the gradient field of `var`.
"""
@fieldview grad(gv::GVar) = gv.g
@fieldview value(gv::GVar) = gv.x
# TODO: fix the problem causing this patch, the field type can not change?!
chfield(x::GVar, ::typeof(value), xval::GVar) = GVar(xval, x.g)
@generated function grad(x::T) where T
isprimitivetype(T) && throw("not supported type to obtain gradients: $T.")
Expr(:new, typegrad(T), [:(grad(x.$NAME)) for NAME in fieldnames(T)]...)
end
typegrad(x) = x
@generated function typegrad(x::Type{T}) where T
if isprimitivetype(T)
T
else
ps = typegrad.(T.parameters)
if length(ps) == 0
:($(getfield(T.name.module, nameof(T))))
else
:($(getfield(T.name.module, nameof(T))){$(ps...)})
end
end
end
typegrad(::Type{GVar{ET,GT}}) where {ET,GT} = ET
grad(gv::T) where T<:Real = zero(T)
grad(gv::AbstractArray{T}) where T = grad.(gv)
grad(gv::Function) = 0
grad(gv::String) = ""
grad(t::Tuple) = grad.(t)
chfield(x::T, ::typeof(grad), g::T) where T = (@invcheck g zero(g); x)
chfield(x::GVar, ::typeof(grad), g::GVar) = GVar(x.x, g)
#chfield(x::GVar, ::typeof(-), val::GVar) = GVar(-val.x, -val.g)
chfield(x::Complex{<:GVar}, ::typeof(grad), g::Complex) = Complex(GVar(value(x.re), g.re), GVar(value(x.im), g.im))
# NOTE: superwarning: check value only to make ancilla gradient descardable.
NiLangCore.deanc(x::GVar{T}, val::GVar{T}) where T = NiLangCore.deanc(value(x), value(val))
function deanc(x::T, val::T) where {T<:AbstractArray}
x === val || deanc.(x, val)
end
# constructors and deconstructors
Base.iszero(x::GVar) = iszero(x.x)
## variable mapping
function (_::Type{Inv{GVar}})(x::GVar{<:GVar,<:GVar})
Partial{:x}(x)
end
Base.show(io::IO, gv::GVar) = print(io, "GVar($(gv.x), $(gv.g))")
Base.show(io::IO, ::MIME"plain/text", gv::GVar) = Base.show(io, gv)
# used in log number iszero function.
Base.isfinite(x::GVar) = isfinite(x.x)
# interfaces
_replace_opmx_callable(ex) = @match ex begin
:(:+=($f)) => :(PlusEq($f))
:(:-=($f)) => :(MinusEq($f))
:(:*=($f)) => :(MulEq($f))
:(:/=($f)) => :(DivEq($f))
:(:⊻=($f)) => :(XorEq($f))
_ => ex
end
"""
@nograd f(args...)
Mark `f(args...)` as having no gradients.
"""
macro nograd(ex)
@match ex begin
:($f($(args...))) => begin
f2 = _replace_opmx_callable(f)
newargs = []
for arg in args
push!(newargs, @match arg begin
:($x::GVar) => :($x.x)
:($x::VecGVar) => :($x.x)
:($x::GVar{$tp}) => :($x.x)
_ => NiLangCore.get_argname(arg)
end
)
end
esc(quote
@i function $f($(args...))
$f2($(newargs...))
end
end)
end
_ => error("expect `f(args...)`, got $ex")
end
end
# ULogarithmic
_content(x::ULogarithmic) = x.log
NiLang.AD.GVar(x::ULogarithmic) = exp(ULogarithmic, GVar(_content(x), zero(_content(x))))
(_::Type{Inv{GVar}})(x::ULogarithmic{GVar{TE}}) where TE = exp(ULogarithmic{TE}, (~GVar)(_content(x)))
Base.one(x::ULogarithmic{GVar{T,GT}}) where {T, GT} = one(ULogarithmic{GVar{T,GT}})
Base.one(::Type{ULogarithmic{GVar{T,GT}}}) where {T,GT} = exp(ULogarithmic, GVar(zero(T), zero(GT)))
Base.zero(x::ULogarithmic{GVar{T,GT}}) where {T,GT} =zero(ULogarithmic{GVar{T,GT}})
Base.zero(::Type{ULogarithmic{GVar{T,T}}}) where T = exp(ULogarithmic, GVar(zero(T), zero(T)))
# the patch for dicts
function GVar(d::Dict)
Dict([(k=>GVar(v)) for (k, v) in d])
end
function (_::Type{Inv{GVar}})(d::Dict)
Dict([(k=>(~GVar)(v)) for (k, v) in d])
end
function grad(d::Dict)
Dict([(k=>grad(v)) for (k, v) in d])
end
================================================
FILE: src/complex.jl
================================================
export CONJ
NiLangCore.chfield(x::Complex, ::typeof(real), r) = chfield(x, Val{:re}(), r)
NiLangCore.chfield(x::Complex, ::typeof(imag), r) = chfield(x, Val{:im}(), r)
@i @inline function NEG(y!::Complex)
NEG(y!.re)
NEG(y!.im)
end
@i @inline function CONJ(y!::Complex{T}) where T
NEG(y!.im)
end
@i @inline function :(+=)(angle)(r!::Real, x::Complex)
r! += atan(x.im, x.re)
end
@i @inline function :(+=)(identity)(y!::Complex, a::Complex)
y!.re += a.re
y!.im += a.im
end
@inline function SWAP(a!::Complex, b!::Complex)
b!, a!
end
@i @inline function :(+=)(abs2)(y!::Real, a::Complex)
y! += a.re^2
y! += a.im^2
end
@i @inline function :(+=)(abs)(y!::Real, a::Complex)
@routine @invcheckoff begin
y2 ← zero(y!)
y2 += abs2(a)
end
y! += sqrt(y2)
~@routine
end
@i @inline function :(+=)(*)(y!::Complex{T}, a::Complex, b::Complex) where T
@routine @invcheckoff begin
@zeros T rere imim reim imre
rere += a.re * b.re
imim += a.im * b.im
reim += a.re * b.im
imre += a.im * b.re
end
y!.re += rere - imim
y!.im += reim + imre
~@routine
end
@i @inline function :(+=)(*)(y!::Complex, a::Real, b::Complex)
y!.re += a * b.re
y!.im += a * b.im
end
@i @inline function :(+=)(*)(y!::Complex, a::Complex, b::Real)
y!.re += a.re * b
y!.im += a.im * b
end
for OP in [:+, :-]
@eval @i @inline function :(+=)($OP)(y!::Complex, a::Complex, b::Complex)
y!.re += $OP(a.re, b.re)
y!.im += $OP(a.im, b.im)
end
@eval @i @inline function :(+=)($OP)(y!::Complex, a::Complex, b::Real)
y!.re += $OP(a.re, b)
end
@eval @i @inline function :(+=)($OP)(y!::Complex, a::Real, b::Complex)
y!.re += $OP(a, b.re)
end
end
@i @inline function :(+=)(/)(y!::Complex, a::Complex, b::Complex{T}) where T
@routine @invcheckoff begin
b2 ← zero(T)
ab ← zero(y!)
b2 += abs2(b)
CONJ(b)
ab += a * b
end
y! += ab / b2
~@routine
end
@i @inline function :(+=)(/)(y!::Complex, a::Complex, b::Real)
y!.re += a.re / b
y!.im += a.im / b
end
@i @inline function :(+=)(/)(y!::Complex, a::Real, b::Complex{T}) where T
@routine @invcheckoff begin
b2 ← zero(T)
ab ← zero(y!)
b2 += abs2(b)
CONJ(b)
ab += a * b
end
y! += ab / b2
~@routine
end
@i @inline function :(+=)(inv)(y!::Complex, b::Complex{T}) where T
@routine @invcheckoff begin
b2 ← zero(real(T))
b2 += abs2(b)
end
y! += b' / b2
~@routine
end
@i @inline function :(+=)(exp)(y!::Complex, x::Complex{T}) where T
@routine @invcheckoff begin
@zeros T s c expn
z ← zero(y!)
(s, c) += sincos(x.im)
SWAP(z.re, c)
SWAP(z.im, s)
expn += exp(x.re)
end
y! += expn * z
~@routine
end
@i @inline function :(+=)(log)(y!::Complex, x::Complex{T}) where T
@routine @invcheckoff begin
n ← zero(T)
n += abs(x)
end
y!.re += log(n)
y!.im += angle(x)
~@routine
end
@i @inline function :(+=)(^)(y!::Complex, a::Complex{T}, b::Real) where T
@routine @invcheckoff begin
@zeros T r θ s c absy bθ
r += abs(a)
θ += angle(a)
bθ += θ * b
(s, c) += sincos(bθ)
absy += r ^ b
end
y!.re += absy * c
y!.im += absy * s
~@routine
end
@i @inline function :(+=)(complex)(y!::Complex, a::Real, b::Real)
y!.re += a
y!.im += b
end
for OP in [:*, :/, :+, :-, :^]
@eval @i @inline function :(+=)($OP)(y!::Complex, a::Real, b::Real)
y!.re += $OP(a, b)
end
end
for OP in [:identity, :cos, :sin, :log, :exp]
@eval @i @inline function :(+=)($OP)(y!::Complex, a::Real)
y!.re += $OP(a)
end
end
@i @inline function HADAMARD(x::Complex, y::Complex)
HADAMARD(x.re, y.re)
HADAMARD(x.im, y.im)
end
================================================
FILE: src/deprecations.jl
================================================
@deprecate simple_hessian hessian_backback
@deprecate hessian_repeat hessian_backback
@deprecate ngradient gradient_numeric
@deprecate nhessian hessian_numeric
@deprecate NEG Base.:-
@deprecate ipush! PUSH!
@deprecate ipop! POP!
================================================
FILE: src/instructs.jl
================================================
export SWAP, FLIP
export ROT, IROT
export INC, DEC, NEG, INV, AddConst, SubConst
export HADAMARD
export PUSH!, POP!, COPYPOP!, COPYPUSH!
"""
NoGrad{T} <: IWrapper{T}
NoGrad(x)
A `NoGrad(x)` is equivalent to `GVar^{-1}(x)`, which cancels the `GVar` wrapper.
"""
struct NoGrad{T} <: IWrapper{T}
x::T
end
NoGrad(x::NoGrad{T}) where T = x # to avoid ambiguity error
NoGrad{T}(x::NoGrad{T}) where T = x # to avoid ambiguity error
(_::Type{Inv{NoGrad}})(x) = x.x
@fieldview value(x::NoGrad) = x.x
const NullType{T} = Union{NoGrad{T}, Partial{T}}
NEG(a!) = -(a!)
@selfdual NEG
@selfdual -
INV(a!) = inv(a!)
@selfdual INV
@inline FLIP(b::Bool) = !b
@selfdual FLIP
"""
INC(a!) -> a! + 1
"""
@inline function INC(a!::Number)
a! + one(a!)
end
"""
DEC(a!) -> a! - 1
"""
@inline function DEC(a!::Number)
a! - one(a!)
end
@dual INC DEC
"""
SWAP(a!, b!) -> b!, a!
"""
@inline function SWAP(a!::T, b!::T) where T
b!, a!
end
@selfdual SWAP
"""
ROT(a!, b!, θ) -> a!', b!', θ
```math
\\begin{align}
{\\rm ROT}(a!, b!, \\theta) = \\begin{bmatrix}
\\cos(\\theta) & - \\sin(\\theta)\\\\
\\sin(\\theta) & \\cos(\\theta)
\\end{bmatrix}
\\begin{bmatrix}
a!\\\\
b!
\\end{bmatrix},
\\end{align}
```
"""
@inline function ROT(i::Real, j::Real, θ::Real)
a, b = rot(i, j, θ)
a, b, θ
end
"""
IROT(a!, b!, θ) -> ROT(a!, b!, -θ)
"""
@inline function IROT(i::Real, j::Real, θ::Real)
i, j, _ = ROT(i, j, -θ)
i, j, θ
end
@dual ROT IROT
"""
HADAMARD(x::Real, y::Real)
Hadamard transformation that returns `(x + y)/√2, (x - y)/√2`
"""
function HADAMARD(x::Real, y::Real)
sqrt(0.5) * (x + y), sqrt(0.5) * (x - y)
end
@selfdual HADAMARD
# more data views
for (DT, OP, NOP) in [(:AddConst, :+, :-), (:SubConst, :-, :+)]
@eval struct $DT{T}
x::T
end
@eval function (f::$DT)(y::Real)
$OP(y, f.x)
end
@eval NiLangCore.chfield(x::T, ac::$DT, xval::T) where T<:Real = $NOP(xval, ac.x)
end
for F1 in [:(Base.:-), :NEG, :(ac::AddConst), :(sc::SubConst)]
@eval @inline function $F1(a!::NullType)
@instr $F1(a! |> value)
a!
end
end
for (OP, F, f) in [(:(PlusEq{typeof(identity)}), :(PlusEq(identity)), :+), (:(MinusEq{typeof(identity)}), :(MinusEq(identity)), :-)]
@eval @inline @generated function (::$OP)(x::T, y::T) where T
if isprimitivetype(T)
Expr(:tuple, Expr(:call, $f, :x, :y), :y)
else
res = gensym("results")
computes = Any[:($($F)(x.$field, y.$field)) for field in fieldnames(T)]
comp = Expr(:(=), res, Expr(:tuple, computes...))
res1 = Expr(:new, T, [:($res[$i][1]) for i=1:length(computes)]...)
res2 = Expr(:new, T, [:($res[$i][2]) for i=1:length(computes)]...)
quote
$comp
($res1, $res2)
end
end
end
@eval (f::$OP)(x::T, y::T) where T<:Tuple = invoke(f, Tuple{T,T} where T, x, y)
@eval (f::$OP)(x::T, y::T) where T<:Real = $f(x, y), y
end
for F2 in [:SWAP, :HADAMARD, :((inf::PlusEq)), :((inf::MinusEq)), :((inf::XorEq))]
@eval @inline function $F2(a::NullType, b::Real)
@instr $(NiLangCore.get_argname(F2))(a |> value, b)
a, b
end
@eval @inline function $F2(a::NullType, b::NullType)
@instr $(NiLangCore.get_argname(F2))(a |> value, b |> value)
a, b
end
@eval @inline function $F2(a::Real, b::NullType)
@instr $(NiLangCore.get_argname(F2))(a, b |> value)
a, b
end
end
function type_except(::Type{TT}, ::Type{T2}) where {TT, T2}
N = length(TT.parameters)
setdiff(Base.Iterators.product(zip(TT.parameters, repeat([T2], N))...), [ntuple(x->T2, N)])
end
for F3 in [:ROT, :IROT, :((inf::PlusEq)), :((inf::MinusEq)), :((inf::XorEq))]
PS = (:a, :b, :c)
for PTS in type_except(Tuple{NullType, NullType, NullType}, Real)
params = map((P,PT)->PT <: NullType ? :($P |> value) : P, PS, PTS)
params_ts = map((P,PT)->:($P::$PT), PS, PTS)
@eval @inline function $F3($(params_ts...))
@instr $F3($(params...))
($(PS...),)
end
end
end
# patch for fixed point numbers
function (f::PlusEq{typeof(/)})(out!::T, x::Integer, y::Integer) where T<:Fixed
out!+T(x)/y, x, y
end
function (f::MinusEq{typeof(/)})(out!::T, x::Integer, y::Integer) where T<:Fixed
out!-T(x)/y, x, y
end
for F in [:exp, :log, :sin, :sinh, :asin, :cos, :cosh, :acos, :tan, :tanh, :atan]
@eval Base.$F(x::Fixed43) = Fixed43($F(Float64(x)))
@eval (f::PlusEq{typeof($F)})(out!::Fixed43, x::Real) = out! + Fixed43($F(x)), x
@eval (f::MinusEq{typeof($F)})(out!::Fixed43, x::Real) = out! - Fixed43($F(x)), x
end
Base.:^(x::Integer, y::Fixed43) = Fixed43(x^(Float64(y)))
Base.:^(x::Fixed43, y::Fixed43) = Fixed43(x^(Float64(y)))
Base.:^(x::T, y::Fixed43) where T<:AbstractFloat = x^(T(y))
function (::PlusEq{typeof(convert)})(out!::T, y) where T<:Real
out! + convert(T, y), y
end
function (::MinusEq{typeof(convert)})(out!::T, y) where T<:Real
out! - convert(T, y), y
end
Base.:~(ac::AddConst) = SubConst(ac.x)
Base.:~(ac::SubConst) = AddConst(ac.x)
@dualtype AddConst SubConst
for F in [:INV, :NEG, :FLIP, :INC, :DEC]
@eval NiLangCore.chfield(x::T, ::typeof($F), xval::T) where T<:Real = (~$F)(xval)
end
#### The following functions are not safe!
@i @inline function PUSH!(x::T) where T
PUSH!((@skip! GLOBAL_STACK), x)
end
@i @inline function POP!(x::T) where T
POP!((@skip! GLOBAL_STACK), x)
end
@i @inline function COPYPUSH!(x)
COPYPUSH!((@skip! GLOBAL_STACK), x)
end
@i @inline function COPYPOP!(x)
COPYPOP!((@skip! GLOBAL_STACK), x)
end
# reversibility turned off, in principle, we can not deallocate `GVar{T}` to `T`
@i @inline function PUSH!(st, x::T) where T
@invcheckoff st[end+1] ↔ x
@invcheckoff x ← _zero(T)
end
@i @inline function POP!(st, x::T) where T
@invcheckoff x → _zero(T)
@invcheckoff st[end] ↔ (x::T)::∅
end
@i @inline function COPYPUSH!(st, x)
@invcheckoff st[end+1] ← x
end
@i @inline function COPYPOP!(st, x)
@invcheckoff st[end] → x
end
# accumulation on arrays: initially for Bennett algorithm
# TODO: also define it for composite types. or maybe a macro for it.
@i function :(+=)(identity)(target::AbstractArray, source::AbstractArray)
@safe @assert length(target) == length(source)
@inbounds for i=1:length(target)
target[i] += source[i]
end
end
================================================
FILE: src/macros.jl
================================================
using MLStyle, NiLang
export alloc, @auto_alloc, @auto_expand
"""
alloc(f, args...)
allocate function output space (the first argument), where `args` only contains the last `N-1` arguments.
"""
function alloc end
macro auto_alloc(ex)
esc(auto_alloc(ex))
end
function auto_alloc(ex)
@match ex begin
:($f($out, $(args...))) => begin
Expr(:block, :($out ← $alloc($f, $(args...))), ex)
end
:($out = $f($(args...))) => begin
if length(args) == 0
error("number of arguments must be >= 1.")
else
Expr(:block, :($out ← $alloc($f, $(args...))), :($out += $f($(args...))))
end
end
_ => error("can not allocate automatically for expression: `$ex`")
end
end
for OPM in [:PlusEq, :MinusEq]
for OP in [:+, :-, :*, :/, :^]
@eval alloc(::$OPM{typeof($OP)}, x::T1, ::T2) where {T1<:Number,T2<:Number} = zero(promote_type(T1, T2))
end
for OP in [:sin, :cos, :tan, :asin, :atan, :acos, :sinh, :cosh, :tanh, :identity, :sqrt, :exp, :log]
@eval alloc(::$OPM{typeof($OP)}, x::T) where T<:Number = zero(T)
end
for OP in [:abs, :abs2]
@eval alloc(::$OPM{typeof($OP)}, x::T) where T<:Number = zero(real(T))
end
@eval alloc(::$OPM{typeof(sincos)}, x::T) where T<:Number = (zero(T), zero(T))
end
function auto_expand(ex)
res = Expr[]
auto_expand!(copy(ex), res)
Expr(:block, res..., NiLangCore.dual_body(@__MODULE__, res[1:end-1])...)
end
function auto_expand!(ex, exprs, sym=nothing, addnew=true)
@match ex begin
:($f($(args...))) => begin
for (i, arg) in enumerate(args)
@match arg begin
:($_{$(_...)}($(_...))) => begin
auto_expand!(arg, exprs, nothing, false)
end
:($f2($(vs...))) => begin
sym2 = gensym()
auto_expand!(:(PlusEq($f2)($sym2, $(vs...))), exprs, sym2, true)
args[i] = sym2
end
_ => nothing
end
end
if sym !== nothing
push!(exprs, :($sym ← $alloc($f, $(args[2:end]...))))
end
if addnew
push!(exprs, :($f($(args...))))
end
end
:($a += $b) || :($a -= $b) || :($a *= $b) || :($a /= $b) || :($a ⊻= $b) => begin
auto_expand!(NiLangCore.to_standard_format(ex), exprs, sym, addnew)
end
_ => error("Can only expand an expression like `f(args...)`, got $(ex)!")
end
end
macro auto_expand(ex)
esc(auto_expand(ex))
end
================================================
FILE: src/stdlib/base.jl
================================================
export i_sqdistance, i_dirtymul, i_factorial
"""
i_sqdistance(dist!, x1, x2)
Squared distance between two points `x1` and `x2`.
"""
@i function i_sqdistance(dist!, x1::AbstractVector{T}, x2::AbstractVector) where T
@inbounds for i=1:length(x1)
x1[i] -= x2[i]
dist! += x1[i] ^ 2
x1[i] += x2[i]
end
end
"""
i_dirtymul(out!, x, anc!)
"dirty" reversible multiplication that computes `out! *= x` approximately for floating point numbers,
the `anc!` is anticipated as a number ~0.
"""
@i @inline function i_dirtymul(out!, x, anc!)
anc! += out! * x
out! -= anc! / x
SWAP(out!, anc!)
end
@i @inline function i_dirtymul(out!::Int, x::Int, anc!::Int)
anc! += out! * x
out! -= anc! ÷ x
SWAP(out!, anc!)
end
"""
i_factorial(out!, n)
Compute the factorial `out! = factorial(n)`.
"""
@i function i_factorial(out!::Int, n::Int)
INC(out!)
@invcheckoff for i=1:n
i_dirtymul(out!, i, 0)
end
end
================================================
FILE: src/stdlib/bennett.jl
================================================
export bennett, bennett!
function direct_emulate(step, x0::T, args...; N::Int, kwargs...) where T
xpre = copy(x0)
local x
for i=1:N
x = _zero(xpre)
res = step(x, xpre, args...; kwargs...)
xpre = res[1]
args = res[3:end]
end
return xpre
end
struct BennettLog
fcalls::Vector{NTuple{3,Any}} # depth, function index f_i := s_{i-1} -> s_{i}, length should be `(2k-1)^n` and function
peak_mem::Base.RefValue{Int} # should be `n*(k-1)+2`
depth::Base.RefValue{Int}
end
BennettLog() = BennettLog(NTuple{3,Any}[], Ref(0), Ref(0))
# hacking the reversible program
function logfcall(l::BennettLog, i, f)
push!(l.fcalls, (l.depth[], i, f))
l, i, f
end
function ilogfcall(l::BennettLog, i, f)
push!(l.fcalls, (l.depth[], i, ~f))
l, i, f
end
@dual logfcall ilogfcall
Base.show(io::IO, ::MIME"text/plain", logger::BennettLog) = Base.show(io, logger)
function Base.show(io::IO, logger::BennettLog)
nreverse = count(x->x[3] isa Inv, logger.fcalls)
print(io, """Bennett log
| peak memory usage = $(logger.peak_mem[])
| number of function forward/backward calls = $(length(logger.fcalls)-nreverse)/$nreverse""")
end
"""
bennett(step, y, x, args...; k, N, logger=BennettLog(), kwargs...)
* `step` is a reversible step function,
* `y` is the output state,
* `x` is the input state,
* `k` is the number of steps in each Bennett's recursion,
* `N` is the total number of steps,
* `logger=BennettLog()` is the logging of Bennett's algorithm,
* `args...` and `kwargs...` are additional arguments for steps.
"""
@i function bennett(step, y::T, x::T, args...; k::Int, N::Int, logger=BennettLog(), kwargs...) where T
state ← Dict{Int, T}()
state[1] ← _zero(x)
state[1] += x
bennett!((@skip! step), state, k, 1, N, args...; do_uncomputing=true, logger=logger, kwargs...)
SWAP(y, state[N+1])
state[1] -= x
state[1] → _zero(x)
state[N+1] → _zero(x)
state → Dict{Int, T}()
end
"""
bennett!(step, state::Dict, args...; k, N, logger=BennettLog(), do_uncomputing=false, kwargs...)
* `step` is a reversible step function,
* `state` is the dictionary state, with `state[1]` the input state, the return value is stored in `state[N+1]`,
* `k` is the number of steps in each Bennett's recursion,
* `N` is the total number of steps,
* `logger=BennettLog()` is the logging of Bennett's algorithm,
* `args...` and `kwargs...` are additional arguments for steps.
"""
@i function bennett!(step, state::Dict{Int,T}, args...; k::Int, N::Int, logger=BennettLog(), do_uncomputing=false, kwargs...) where T
bennett!(step, state, k, 1, N, args...; logger=logger, do_uncomputing=do_uncomputing, kwargs...)
end
@i function bennett!(step, state::Dict{Int,T}, k::Int, base, len, args...; logger, do_uncomputing, kwargs...) where T
@safe logger !== nothing && (logger.depth[] += 1)
@invcheckoff if len == 1
state[base+1] ← _zero(state[base])
@safe logger !== nothing && (logger.peak_mem[] = max(logger.peak_mem[], length(state)))
getf(step, base)(state[base+1], state[base], args...; kwargs...)
if logger !== nothing
logfcall(logger, (@const base+1), (@const getf(step, base)))
end
else
@routine begin
@zeros Int nstep n
n += ceil((@skip! Int), (@const len / k))
nstep += ceil((@skip! Int), (@const len / n))
end
for j=1:nstep
bennett!(step, state, k, (@const base+n*(j-1)), (@const min(n,len-n*(j-1))), args...; logger=logger, do_uncomputing=true, kwargs...)
end
if do_uncomputing
for j=nstep-1:-1:1
~bennett!(step, state, k, (@const base+n*(j-1)), n, args...; logger=logger, do_uncomputing=true, kwargs...)
end
end
~@routine
end
end
getf(f, i::Int) = f
getf(f::AbstractArray, i::Int) = f[i]
================================================
FILE: src/stdlib/blas.jl
================================================
export i_sum, i_mul!, i_dot, i_axpy!, i_umm!, i_norm2
"""
i_sum(out!, x)
get the sum of `x`.
"""
@i function i_sum(out!, x::AbstractArray)
@invcheckoff for i=1:length(x)
@inbounds out! += x[i]
end
end
@i function i_sum(out!, f, x::AbstractArray)
@invcheckoff for i=1:length(x)
@inbounds out! += f(x[i])
end
end
"""
i_mul!(out!, x, y)
compute `x * y` (`x` and `y` are matrices, and store results in `out!`.
"""
@i function i_mul!(out!::AbstractMatrix{T}, x::AbstractMatrix{T}, y::AbstractMatrix{T}) where T
@safe size(x, 2) == size(y, 1) || throw(DimensionMismatch())
@invcheckoff @inbounds for k=1:size(y,2)
for j=1:size(x,2)
for i=1:size(x,1)
out![i,k] += x[i,j] * y[j,k]
end
end
end
end
@i function i_mul!(out!::AbstractVector{T}, x::AbstractMatrix, y::AbstractVector) where T
@safe size(x, 2) == size(y, 1) || throw(DimensionMismatch())
@invcheckoff @inbounds for j=1:size(x,2)
@routine begin
yj ← zero(T)
yj += y[j]
end
for i=1:size(x,1)
out![i] += x[i,j] * yj
end
~@routine
end
end
@i function i_dot(out!, x, y)
@safe @assert length(x) == length(y)
@invcheckoff @inbounds for i=1:length(x)
out! += x[i]' * y[i]
end
end
"""
i_norm2(out!, x)
get the squared norm of `x`.
"""
@i function i_norm2(out!, x)
@invcheckoff @inbounds for i=1:length(x)
out! += abs2(x[i])
end
end
"""
i_axpy!(a, x, y!)
compute `y! += a * x`, where `x` and `y` are vectors.
"""
@i function i_axpy!(a, X, Y)
@safe @assert length(X) == length(Y)
@invcheckoff @inbounds for i=1:length(Y)
Y[i] += a * X[i]
end
end
"""
i_umm!(x!, θ)
Compute unitary matrix multiplication on `x`, where the unitary matrix is parameterized by (N+1)*N/2 `θ`s.
"""
@i function i_umm!(x!::AbstractArray, θ)
@routine begin
M ← size(x!, 1)
N ← size(x!, 2)
end
k ← 0
@safe @assert length(θ) == M*(M-1)/2
for l = 1:N
for j=1:M
for i=M-1:-1:j
INC(k)
ROT(x![i,l], x![i+1,l], θ[k])
end
end
end
k → length(θ)
~@routine
end
================================================
FILE: src/stdlib/linalg.jl
================================================
export i_inv!, i_affine!
"""
i_inv!(out!, A)
Get the inverse of `A`.
```note!!!
this function is implemented as a primitive.
```
"""
@i function i_inv!(out!::AbstractMatrix{T}, A::AbstractMatrix{T}) where T
@invcheckoff invA ← inv(A)
out! .+= invA
@invcheckoff invA → inv(A)
end
@i function i_inv!(out!::AbstractMatrix{T}, A::AbstractMatrix{T}) where T<:GVar
@routine @invcheckoff begin
invA ← inv(value.(A))
gA ← -transpose(invA) * grad(out!) * transpose(invA)
end
for i=1:length(out!)
(out![i] |> value) -= invA[i]
end
for i=1:length(A)
(A[i] |> grad) -= gA[i]
end
~@routine
end
@i function :(-=)(det)(out!::T, A::AbstractMatrix{T}) where T<:GVar
@routine @invcheckoff begin
vA ← value.(A)
detA ← det(vA)
gA ← detA * grad(out!) * transpose(inv(vA))
end
(out! |> value) -= detA
for i=1:length(A)
(A[i] |> grad) += gA[i]
end
~@routine
end
@i function :(-=)(logdet)(out!::T, A::AbstractMatrix{T}) where T<:GVar
@routine @invcheckoff begin
gA ← grad(out!) * transpose(inv(value.(A)))
end
(out! |> value) -= det(A |> grad)
for i=1:length(A)
(A[i] |> grad) += gA[i]
end
~@routine
end
"""
i_affine!(y!, W, b, x)
`affine!` transformation `y! += W*x + b`.
"""
@i function i_affine!(y!::AbstractVector{T}, W::AbstractMatrix{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T
@safe @assert size(W) == (length(y!), length(x)) && length(b) == length(y!)
@invcheckoff for j=1:size(W, 2)
for i=1:size(W, 1)
@inbounds y![i] += W[i,j]*x[j]
end
end
@invcheckoff for i=1:size(W, 1)
@inbounds y![i] += b[i]
end
end
================================================
FILE: src/stdlib/mapreduce.jl
================================================
export i_mapfoldl, i_filter!, i_map!
"""
i_mapfoldl(map, fold, out!, iter)
Reversible `mapfoldl` function, `map` can be irreversible, but `fold` should be reversible.
"""
@i function i_mapfoldl(map, fold, out!::T, iter) where T
anc ← zero(T)
for i=1:length(iter)
anc += map(iter[i])
fold(out!, anc)
anc -= map(iter[i])
end
anc → zero(T)
end
"""
i_filter!(f, out!, iter)
Reversible `filter` function, `out!` is an emptied vector.
"""
@i function i_filter!(f, out!::AbstractVector, x::AbstractVector{T}) where T
@invcheckoff @inbounds for i = 1:length(x)
if (f(x[i]), ~)
COPYPUSH!(out!, x[i])
end
end
end
================================================
FILE: src/stdlib/nnlib.jl
================================================
export i_softmax_crossentropy, i_relu, i_logsumexp
function (_::PlusEq{typeof(argmax)})(out!, x::AbstractArray)
out! += argmax(x)
out!, x
end
function (_::MinusEq{typeof(argmax)})(out!, x::AbstractArray)
out! -= argmax(x)
out!, x
end
"""
i_softmax_crossentropy(x, p, imax, xmax, Z, out)
Softmax-Cross entropy function.
"""
@i function i_softmax_crossentropy(x, p, imax, xmax, Z, out::T) where T
# subtract maximum
imax += argmax(x) # trade off space of xmax to time
xmax += x[imax]
# accumulate exp(x) to Z, and finally get logZ
for i=1:length(x)
x[i] -= xmax
Z += Base.exp(x[i])
end
@routine begin
yi ← zero(T)
logZ ← zero(T)
logZ += log(Z)
end
for i=1:length(x)
yi += logZ
yi -= x[i]
out += yi * p[i]
yi += x[i]
yi -= logZ
end
~@routine
end
"""
i_relu(out!, x)
ReLU in machine learning.
"""
@i function i_relu(out!, x)
@invcheckoff if (x > 0, ~)
out! += x
end
end
"""
i_logsumexp(logout!, out!, xs!, inds!, x)
Compute `logout! = log(sum(exp(x)))`.
# Arguments
* `out!`, output,
* `logout!`, logged output,
* `xs!`, an empty vector to cache the ascending values (same type as `x`),
* `inds!`, an empty vector to cache the ascending indices (integer type),
* `x`, input vector.
"""
@i function i_logsumexp(logout!, out!, xs!, inds!, x::AbstractArray{T}) where T
i_ascending!(xs!, inds!, x)
@routine begin
mx ← zero(T)
mx += xs![end]
end
@invcheckoff @inbounds for i=1:length(x)
x[i] -= mx
out! += exp(x[i])
x[i] += mx
end
logout! += log(out!)
logout! += mx
~@routine
end
================================================
FILE: src/stdlib/sorting.jl
================================================
export i_ascending!
"""
i_ascending!(xs!, inds!, arr)
Find the ascending sequence in `arr` and store the results into `xs!`, indices are stored in `inds!`.
This function can be used to get the maximum value and maximum indices.
"""
@i function i_ascending!(xs!::AbstractVector{T}, inds!, arr::AbstractArray{T}) where T
@invcheckoff if (length(arr) > 0, ~)
y ← zero(T)
y += arr[1]
xs![end+1] ↔ y
anc ← 1
inds![end+1] ↔ anc
@inbounds for i = 2:length(arr)
if (arr[i] > xs![end], i==inds![end])
ind ← i
x ← zero(T)
x += arr[i]
xs![end+1] ↔ x
inds![end+1] ↔ ind
end
end
end
end
================================================
FILE: src/stdlib/sparse.jl
================================================
using SparseArrays
@i function i_mul!(C::StridedVecOrMat, A::AbstractSparseMatrix, B::StridedVector{T}, α::Number, β::Number) where T
@safe size(A, 2) == size(B, 1) || throw(DimensionMismatch())
@safe size(A, 1) == size(C, 1) || throw(DimensionMismatch())
@safe size(B, 2) == size(C, 2) || throw(DimensionMismatch())
@routine begin
nzv ← nonzeros(A)
rv ← rowvals(A)
end
if (β != 1, ~)
@safe error("only β = 1 is supported, got β = $(β).")
end
# Here, we close the reversibility check inside the loop to increase performance
@invcheckoff for k = 1:size(C, 2)
@inbounds for col = 1:size(A, 2)
@routine begin
αxj ← zero(T)
αxj += B[col,k] * α
end
for j = SparseArrays.getcolptr(A)[col]:(SparseArrays.getcolptr(A)[col + 1] - 1)
C[rv[j], k] += nzv[j]*αxj
end
~@routine
end
end
~@routine
end
@i function i_dot(r::T, A::SparseMatrixCSC{T},B::SparseMatrixCSC{T}) where {T}
@routine @invcheckoff begin
(m, n) ← size(A)
branch_keeper ← zeros(Bool, 2*m)
end
@safe size(B) == (m,n) || throw(DimensionMismatch("matrices must have the same dimensions"))
@invcheckoff @inbounds for j = 1:n
@routine begin
ia1 ← A.colptr[j]
ib1 ← B.colptr[j]
ia2 ← A.colptr[j+1]
ib2 ← B.colptr[j+1]
ia ← ia1
ib ← ib1
end
@inbounds for i=1:ia2-ia1+ib2-ib1-1
ra ← A.rowval[ia]
rb ← B.rowval[ib]
if (ra == rb, ~)
r += A.nzval[ia]' * B.nzval[ib]
end
## b move -> true, a move -> false
branch_keeper[i] ⊻= @const ia == ia2-1 || (ib != ib2-1 && ra > rb)
ra → A.rowval[ia]
rb → B.rowval[ib]
if (branch_keeper[i], ~)
INC(ib)
else
INC(ia)
end
end
~@inbounds for i=1:ia2-ia1+ib2-ib1-1
## b move -> true, a move -> false
branch_keeper[i] ⊻= @const ia == ia2-1 || (ib != ib2-1 && A.rowval[ia] > B.rowval[ib])
if (branch_keeper[i], ~)
INC(ib)
else
INC(ia)
end
end
~@routine
end
~@routine
end
================================================
FILE: src/stdlib/statistics.jl
================================================
export i_mean_sum, i_var_mean_sum, i_normal_logpdf, i_cor_cov
export VarianceInfo
"""
i_mean_sum(out!, sum!, x)
get the `mean` and `sum` of `x`.
"""
@i function i_mean_sum(out!, sum!, x)
for i=1:length(x)
sum! += x[i]
end
out! += sum!/(@const length(x))
end
struct VarianceInfo{T}
variance::T
variance_accumulated::T
mean::T
sum::T
end
function VarianceInfo(::Type{T}) where T
VarianceInfo(zero(T), zero(T), zero(T), zero(T))
end
"""
i_var_mean_sum(varinfo, sqv)
i_var_mean_sum(var!, varsum!, mean!, sum!, v)
Compute the variance, the accumulated variance, mean and sum.
`varinfo` is the `VarianceInfo` object to store outputs.
"""
@i function i_var_mean_sum(varinfo::VarianceInfo{T}, v::AbstractVector{T}) where T
i_var_mean_sum(varinfo.variance, varinfo.variance_accumulated, varinfo.mean, varinfo.sum, v)
end
@i function i_var_mean_sum(var!, varsum!, mean!, sum!, v::AbstractVector{T}) where T
i_mean_sum(mean!, sum!, v)
for i=1:length(v)
@routine @invcheckoff begin
x ← zero(T)
x += v[i] - mean!
end
varsum! += x ^ 2
~@routine
end
var! += varsum! / (@const length(v)-1)
end
"""
i_normal_logpdf(out, x, μ, σ)
get the pdf of `Normal(μ, σ)` at point `x`.
"""
@i function i_normal_logpdf(out, x::T, μ, σ) where T
@routine @invcheckoff begin
@zeros T anc1 anc2 anc3
anc1 += x
anc1 -= μ
anc2 += anc1 / σ # (x- μ)/σ
anc3 += anc2^2 # (x-μ)^2/σ^2
end
out -= anc3 * 0.5 # -(x-μ)^2/2σ^2
out -= log(σ) # -(x-μ)^2/2σ^2 - log(σ)
out -= log(2π)/2 # -(x-μ)^2/2σ^2 - log(σ) - log(2π)/2
~@routine
end
"""
i_cor_cov(rho!,cov!,a,b)
get Pearson correlation and covariance of two vectors `a` and `b`
"""
@i function i_cor_cov(rho!::T, cov!::T, a::AbstractVector{T}, b::AbstractVector{T}) where T
@safe @assert length(a) == length(b)
@routine @invcheckoff begin
@zeros T std1 std2
info1 ← _zero(VarianceInfo{T})
i_var_mean_sum(info1, a)
std1 += sqrt(info1.variance)
info2 ← _zero(VarianceInfo{T})
i_var_mean_sum(info2, b)
std2 += sqrt(info2.variance)
@zeros T anc5 anc6 anc7
@inbounds for i=1:length(b)
@routine begin
@zeros T anc3 anc4
anc3 += a[i] - info1.mean
anc4 += b[i] - info2.mean
end
anc5 += anc3 * anc4
~@routine
end
anc6 += std1 * std2
anc7 += anc6 * (@const length(b)-1)
end
cov! += anc5 / (@const length(b)-1)
rho! += anc5 / anc7
~@routine
end
================================================
FILE: src/stdlib/stdlib.jl
================================================
using .NiLang.AD
using LinearAlgebra
include("base.jl")
include("blas.jl")
include("linalg.jl")
include("statistics.jl")
include("nnlib.jl")
include("sparse.jl")
include("mapreduce.jl")
include("sorting.jl")
include("bennett.jl")
================================================
FILE: src/ulog.jl
================================================
using LogarithmicNumbers
export gaussian_log, gaussian_nlog
export ULogarithmic
@i @inline function (:*=(identity))(x::ULogarithmic, y::ULogarithmic)
x.log += y.log
end
@i @inline function (:*=(identity))(x::ULogarithmic, y::Real)
x.log += log(y)
end
for (OP1, OP2, OP3) in [(:*, :+, :(+=)), (:/, :-, :(-=))]
@eval @i @inline function (:*=($OP1))(out!::ULogarithmic, x::ULogarithmic, y::ULogarithmic)
out!.log += $OP2(x.log, y.log)
end
@eval @i @inline function (:*=($OP1))(out!::ULogarithmic, x::Real, y::Real)
out!.log += log(x)
$(Expr(OP3, :(out!.log), :(log(y))))
end
@eval @i @inline function (:*=($OP1))(out!::ULogarithmic, x::ULogarithmic, y::Real)
out!.log += x.log
$(Expr(OP3, :(out!.log), :(log(y))))
end
@eval @i @inline function (:*=($OP1))(out!::ULogarithmic, x::Real, y::ULogarithmic)
out!.log += log(x)
$(Expr(OP3, :(out!.log), :(y.log)))
end
end
@i @inline function (:*=(^))(out!::ULogarithmic, x::ULogarithmic, y::Real)
out!.log += x.log * y
end
gaussian_log(x) = log1p(exp(x))
gaussian_nlog(x) = log1p(-exp(x))
@i function (:*=)(+)(out!::ULogarithmic{T}, x::ULogarithmic{T}, y::ULogarithmic{T}) where {T}
@invcheckoff if (x.log == y.log, ~)
out!.log += x.log
out!.log += log(2)
elseif (x.log ≥ y.log, ~)
out!.log += x.log
y.log -= x.log
out!.log += gaussian_log(y.log)
y.log += x.log
else
out!.log += y.log
x.log -= y.log
out!.log += gaussian_log(x.log)
x.log += y.log
end
end
@i function (:*=)(-)(out!::ULogarithmic{T}, x::ULogarithmic{T}, y::ULogarithmic{T}) where {T}
@safe @assert x.log ≥ y.log
@invcheckoff if (!iszero(x), ~)
out!.log += x.log
y.log -= x.log
out!.log += gaussian_nlog(y.log)
y.log += x.log
end
end
@i function :(*=)(convert)(out!::ULogarithmic{T}, y::ULogarithmic) where T
out!.log += convert((@skip! T), y.log)
end
@i function :(*=)(convert)(out!::ULogarithmic{T}, y::T) where T<:Real
out!.log += log(y)
end
function (f::PlusEq)(out!::ULogarithmic{T}, args...) where T
throw(MethodError(f, (out!, args...)))
end
function (f::MinusEq)(out!::ULogarithmic{T}, args...) where T
throw(MethodError(f, (out!, args...)))
end
Base.convert(::Type{T}, x::ULogarithmic{T}) where {T<:Fixed} = exp(x.log)
function NiLangCore.deanc(x::T, v::T) where T<:ULogarithmic
x === v || NiLangCore.deanc(x.log, v.log)
end
================================================
FILE: src/utils.jl
================================================
export rot, plshift, prshift, arshift
"""
rot(a, b, θ)
rotate variables `a` and `b` by an angle `θ`
"""
function rot(a, b, θ)
s, c = sincos(θ)
a*c-b*s, a*s+b*c
end
"""
plshift(x, n)
periodic left shift.
"""
plshift(x, n) = (x << n) | (x >> (sizeof(x)*8-n))
"""
plshift(x, n)
periodic right shift.
"""
prshift(x, n) = (x >> n) | (x << (sizeof(x)*8-n))
"""
arshift(x, n)
right shift, sign extending.
"""
arshift(x::T, n) where T = (x >> n) | (x & (T(1) << (sizeof(x)*8-1)))
================================================
FILE: src/vars.jl
================================================
# variable manipulation
export @zeros, @ones
"""
Create zeros of specific type.
```julia
julia> @i function f(x)
@zeros Float64 a b c
# do something
end
```
"""
macro zeros(T, args...)
esc(Expr(:block, map(x->:($x ← zero($T)), args)...))
end
macro ones(T, args...)
esc(Expr(:block, map(x->:($x ← one($T)), args)...))
end
function NiLangCore.chfield(a::AbstractArray, ::typeof(vec), val)
reshape(val, size(a)...)
end
================================================
FILE: src/wrappers.jl
================================================
export IWrapper, Partial, unwrap, value
"""
value(x)
Get the `value` from a wrapper instance.
"""
value(x) = x
NiLangCore.chfield(x::T, ::typeof(value), y::T) where T = y
"""
IWrapper{T} <: Real
IWrapper{T} is a wrapper of for data of type T.
It will forward `>, <, >=, <=, ≈` operations.
"""
abstract type IWrapper{T} <: Real end
NiLangCore.chfield(x, ::Type{T}, v) where {T<:IWrapper} = (~T)(v)
Base.eps(::Type{<:IWrapper{T}}) where T = Base.eps(T)
"""
unwrap(x)
Unwrap a wrapper instance (recursively) to get the content value.
"""
unwrap(x::IWrapper) = unwrap(value(x))
unwrap(x) = x
for op in [:>, :<, :>=, :<=, :isless, :(==), :≈]
@eval Base.$op(a::IWrapper, b::IWrapper) = $op(unwrap(a), unwrap(b))
@eval Base.$op(a::IWrapper, b::Real) = $op(unwrap(a), b)
@eval Base.$op(a::IWrapper, b::AbstractFloat) = $op(unwrap(a), b)
@eval Base.$op(a::Real, b::IWrapper) = $op(a, unwrap(b))
@eval Base.$op(a::AbstractFloat, b::IWrapper) = $op(a, unwrap(b))
end
"""
Partial{FIELD, T, T2} <: IWrapper{T2}
Take a field `FIELD` without dropping information.
This operation can be undone by calling `~Partial{FIELD}`.
"""
struct Partial{FIELD, T, T2} <: IWrapper{T2}
x::T
function Partial{FIELD,T,T2}(x::T) where {T,T2,FIELD}
new{FIELD,T,T2}(x)
end
function Partial{FIELD,T,T2}(x::T) where {T<:Complex,T2,FIELD}
new{FIELD,T,T2}(x)
end
end
Partial{FIELD}(x::T) where {T,FIELD} = Partial{FIELD,T,typeof(getfield(x,FIELD))}(x)
Partial{FIELD}(x::T) where {T<:Complex,FIELD} = Partial{FIELD,T,typeof(getfield(x,FIELD))}(x)
@generated function (_::Type{Inv{Partial{FIELD}}})(x::Partial{FIELD}) where {FIELD}
:(x.x)
end
function NiLangCore.chfield(hd::Partial{FIELD}, ::typeof(value), val) where FIELD
chfield(hd, Val(:x), chfield(hd.x, Val(FIELD), val))
end
@generated function value(hv::Partial{FIELD}) where FIELD
:(hv.x.$FIELD)
end
function Base.zero(x::T) where T<:Partial
zero(T)
end
function Base.zero(x::Type{<:Partial{FIELD,T}}) where {FIELD, T}
Partial{FIELD}(Base.zero(T))
end
Base.show(io::IO, gv::Partial{FIELD}) where FIELD = print(io, "$(gv.x).$FIELD")
Base.show(io::IO, ::MIME"plain/text", gv::Partial) = Base.show(io, gv)
================================================
FILE: test/autobcast.jl
================================================
using NiLang
using Test
@testset "auto bcast" begin
a = AutoBcast([1.0, 2.0, 3.0])
@instr NEG(a)
@test a.x == [-1.0,-2.0,-3.0]
a = AutoBcast([1.0, 2.0, 3.0])
@instr INC(a)
@test a.x == [2.0,3.0,4.0]
@instr DEC(a)
@test a.x == [1.0,2.0,3.0]
a = AutoBcast([false, true, true])
@instr FLIP(a)
@test a.x == [true, false, false]
a = AutoBcast([1.0, 2.0, 3.0])
b = AutoBcast([1.0, 2.0, 4.0])
@instr a += b
@test a.x == [2,4,7.0]
@test b.x == [1,2,4.0]
@instr SWAP(a, b)
@test b.x == [2,4,7.0]
@test a.x == [1,2,4.0]
a = AutoBcast([1.0, 2.0, 3.0])
b = 2.0
@instr a += b
@test a.x == [3,4,5.0]
@test b == 2.0
a = AutoBcast([1.0, 2.0, 3.0])
b = AutoBcast([1.0, 2.0, 4.0])
c = AutoBcast([1.0, 2.0, 1.0])
@instr a += b * c
@test a.x == [2,6,7.0]
@test b.x == [1,2,4.0]
@test c.x == [1,2,1.0]
a = AutoBcast([1.0, 2.0, 3.0])
b = 2.0
c = AutoBcast([1.0, 2.0, 1.0])
@instr a += b * c
@test a.x == [3,6,5.0]
@test b == 2.0
@test c.x == [1,2,1.0]
a = AutoBcast([1.0, 2.0, 3.0])
b = AutoBcast([1.0, 2.0, 4.0])
c = 3.0
@instr a += b * c
@test a.x == [4,8,15.0]
@test b.x == [1,2,4.0]
@test c == 3.0
a = AutoBcast([1.0, 2.0, 3.0])
b = 2.0
c = 3.0
@instr a += b * c
@test a.x == [7,8,9.0]
@test b == 2.0
@test c == 3.0
@test zero(AutoBcast{Int,3}) == AutoBcast([0, 0, 0])
end
================================================
FILE: test/autodiff/autodiff.jl
================================================
using Test, NiLang, NiLang.AD
include("vars.jl")
include("stack.jl")
include("gradfunc.jl")
include("instructs.jl")
include("ulog.jl")
include("complex.jl")
include("manual.jl")
include("jacobian.jl")
include("hessian_backback.jl")
================================================
FILE: test/autodiff/complex.jl
================================================
using Test, NiLang, NiLang.AD
@testset "complex GVar" begin
a = 1.0+ 2im
@test GVar(a) == Complex(GVar(1.0), GVar(2.0))
@test GVar(a, a) == Complex(GVar(1.0, 1.0), GVar(2.0, 2.0))
gx = GVar(1.0 + 1.0im)
gx2 = chfield(gx, grad, 1.0+0.0im)
@test gx2 == Complex(GVar(1.0, 1.0), GVar(1.0, 0.0))
end
@i function fr(f, loss, args...; il)
f(args...)
loss += (args |> tget(il)).re
end
@i function fi(f, loss, args...; il)
f(args...)
loss += (args |> tget(il)).im
end
function ccheck_grad(f, args; verbose=true, iloss=1)
check_grad(fr, (f, 0.0, args...); verbose=verbose, iloss=2, il=1) &&
check_grad(fi, (f, 0.0, args...); verbose=verbose, iloss=2, il=1)
end
@testset "check grad" begin
x = 1.0 - 4.0im
y = 2.0 - 2.3im
z = 3.0 + 1.0im
r = 4.0
for opm in [PlusEq, MinusEq]
@test check_inv(opm(complex), (1+2.0im, 2.0, 3.0); verbose=true)
@test ccheck_grad(opm(complex), (1+2.0im, 2.0, 3.0); verbose=true, iloss=1)
for (subop, args) in [
(opm(identity), (x,y)), (opm(+), (x, y, z)),
(opm(-), (x, y, z)), (opm(*), (x, y, z)),
(opm(/), (x, y, z)), (opm(^), (x, y, r)),
(opm(exp), (x, y)), (opm(log), (x, y)),
(opm(inv), (x, y))
]
@test ccheck_grad(subop, args; verbose=true, iloss=1)
r1 = subop(args...)
r2 = [(opm == (PlusEq) ? Base.:+ : Base.:-)(args[1], subop.f(args[2:end]...)), args[2:end]...]
@test all(r1 .≈ r2)
end
for (subop, args) in [
(opm(angle), (r, y)), (opm(abs), (r, y)), (opm(abs), (r, 0.0im)),
(opm(abs2), (r, y))
]
@show subop, args
r1 = [subop(args...)...]
r2 = [(opm == (PlusEq) ? Base.:+ : Base.:-)(args[1], subop.f(args[2:end]...)), args[2:end]...]
@test r1 ≈ r2
@test check_grad(subop, args; verbose=true, iloss=1)
end
end
for op in [NEG]
@test check_inv(op, (x,); verbose=true)
@test ccheck_grad(op, (x,); verbose=true, iloss=1)
end
end
================================================
FILE: test/autodiff/gradfunc.jl
================================================
using Test, NiLang, NiLang.AD
const add = PlusEq(identity)
@testset "NGrad" begin
@test NGrad{3}(exp) isa NGrad{3,typeof(exp)}
end
@testset "instr" begin
x, y = 3.0, 4.0
@instr Grad(add)(x, y; iloss=1)
@test grad(x) == 1.0
@test grad(y) == 1.0
@test check_inv(Grad(add), (3.0, 4.0); verbose=true, atol=1e-5, iloss=1)
x, y = 3.0, 4.0
@test check_grad(add, (x, y); iloss=1)
x, y = 3.0, 4.0
Grad(add)(x, NoGrad(y); iloss=1)
@test grad(y) === 0.0
@test check_inv(PlusEq(*), (0.4, 0.4, 0.5))
@test MinusEq(*)(GVar(0.0, 1.0), GVar(0.4), GVar(0.6)) == (GVar(-0.24, 1.0), GVar(0.4, 0.6), GVar(0.6, 0.4))
@test check_grad(PlusEq(*), (0.4, 0.4, 0.5); iloss=1)
@test check_grad(MinusEq(*), (0.4, 0.4, 0.5); iloss=1)
end
@testset "i" begin
@i function test1(a, b, out)
a += b
out += a * b
end
@i function tt(a, b)
out ← 0.0
test1(a, b, out)
(~test1)(a, b, out)
a += b
out → 0.0
end
# compute (a+b)*b -> out
x = 3.0
y = 4.0
out = 0.0
@test check_grad(test1, (x, y, out); iloss=3)
@test check_grad(tt, (x, y); iloss=1)
end
@testset "broadcast" begin
# compute (a+b)*b -> out
@i function test1(a, b)
a .+= b
end
@i function test2(a, b, out, loss)
a .+= b
out .+= (a .* b)
loss += out[1]
end
x = [3, 1.0]
y = [4, 2.0]
out = [0.0, 1.0]
loss = 0.0
# gradients
@test check_grad(test2, (x, y, out, loss); iloss=4)
end
@testset "broadcast 2" begin
# compute (a+b)*b -> out
@i function test1(a, b)
a += b
end
@i function test2(a, b, out)
a += b
out += (a * b)
end
# gradients
a = 1.0
b = 1.3
c = 1.9
@test check_grad(test2, (a,b,c); iloss=3)
x = GVar([3, 1.0])
y = GVar([4, 2.0])
lout = GVar.([0.0, 1.0], [0.0, 2.0])
@instr (~test2).(x, y, lout)
@test grad.(lout) == [0,2.0]
@test grad.(x) == [0, 4.0]
@test grad.(y) == [0, 6.0]
end
@testset "function call function" begin
# compute (a+b)*b -> out
@i function test1(a, b)
a += b
end
@i function test2(a, b, out)
test1(a, out)
(~test1)(a, out)
out += (a * b)
end
a = 1.0
b = 1.3
c = 1.9
@test check_grad(test2, (a,b,c); iloss=3)
end
@testset "neg sign" begin
@i function test(out, x, y)
out += x * (-y)
end
@test check_grad(test, (0.1, 2.0, -2.5); verbose=true, iloss=1)
end
@testset "i" begin
@i function test1(a::T, b, out) where T<:Number
add(a, b)
out += a * b
end
@test isreversible(Grad(test1), Tuple{Number, Any,Any})
@test isreversible(~Grad(test1), Tuple{Number, Any,Any})
@test Grad(~test1) != ~(Grad(test1)) # this is not true
end
@testset "gradient" begin
@test gradient((PlusEq(*)), (0.0, 2.0, 3.0); iloss=1) == (1.0, 3.0, 2.0)
end
================================================
FILE: test/autodiff/hessian_backback.jl
================================================
using NiLang, NiLang.AD, Test
using NiLang.AD: hessian_numeric
@testset "hessian" begin
h1 = hessian_backback(PlusEq(*), (0.0, 2.0, 3.0); iloss=1)
h2 = hessian_numeric(PlusEq(*), (0.0, 2.0, 3.0); iloss=1)
@test h1 ≈ h2
@i function test(a,b,c,d)
a += b*c
a += b^d
c += b/d
ROT(a, c, d)
b += d ^ 2
a += c * d
end
h1 = hessian_backback(test, (0.0, 2.0, 1.0, 3.0); iloss=1)
h2 = hessian_numeric(test, (0.0, 2.0, 1.0, 3.0); iloss=1)
@show h2
@test isapprox(h1, h2, atol=1e-8)
end
================================================
FILE: test/autodiff/instructs.jl
================================================
using NiLang, NiLang.AD
using Test
@testset "check grad" begin
for opm in [PlusEq, MinusEq]
@test check_grad(opm(identity), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(*), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(+), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(-), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(^), (1.0, 2.0, 2); verbose=true, iloss=1)
@test check_grad(opm(^), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(inv), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(sqrt), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(abs), (1.0, -2.0); verbose=true, iloss=1)
@test check_grad(opm(abs2), (1.0, -2.0); verbose=true, iloss=1)
@test check_grad(opm(exp), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(log), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(sin), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(sinh), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(asin), (1.0, 0.2); verbose=true, iloss=1)
@test check_grad(opm(cos), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(cosh), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(acos), (1.0, 0.2); verbose=true, iloss=1)
@test check_grad(opm(tan), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(tanh), (1.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(atan), (1.0, -2.0); verbose=true, iloss=1)
@test check_grad(opm(atan), (1.0, -2.0, 1.5); verbose=true, iloss=1)
@test check_grad(opm(convert), (Fixed43(0.5), 2.0); verbose=true, iloss=1)
@test check_grad(opm(/), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(min), (1.0, 2.0, 3.0); verbose=true, iloss=1)
@test check_grad(opm(max), (1.0, 2.0, 3.0); verbose=true, iloss=1)
@test check_grad(opm(min), (1.0, 3.0, 2.0); verbose=true, iloss=1)
@test check_grad(opm(max), (1.0, 3.0, 2.0); verbose=true, iloss=1)
@test_broken check_grad(opm(÷), (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test gradient(opm(sqrt), (1.0, 0.0); iloss=1)[2] == 0
end
@test check_grad(NEG, (1.0,); verbose=true, iloss=1)
@test check_grad(INV, (3.0,); verbose=true, iloss=1)
@test check_grad(AddConst(2.0), (3.0,); verbose=true, iloss=1)
@test check_grad(SubConst(2.0), (3.0,); verbose=true, iloss=1)
@test check_grad(INC, (1.0,); verbose=true, iloss=1)
@test check_grad(DEC, (1.0,); verbose=true, iloss=1)
@test check_grad(ROT, (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(ROT, (1.0, 2.0, 2.0); verbose=true, iloss=2)
@test check_grad(IROT, (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(IROT, (1.0, 2.0, 2.0); verbose=true, iloss=2)
@test check_grad(HADAMARD, (3.0, 2.0); verbose=true, iloss=1)
@test check_grad(HADAMARD, (3.0, 2.0); verbose=true, iloss=2)
end
@testset "partial gvar" begin
@i function testf1(f, a, b)
f(a, b, 2.0)
end
@i function testf2(f, a, b)
f(a, 2.0, b)
end
for testf in [testf1, testf2]
for opm in [PlusEq, MinusEq]
@test check_grad(testf, (opm(*), 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf, (opm(+), 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf, (opm(-), 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf, (opm(^), 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf, (opm(atan), 1.0, -2.0); verbose=true, iloss=2)
@test check_grad(testf, (opm(/), 1.0, 2.0); verbose=true, iloss=2)
end
end
@test check_grad(testf1, (ROT, 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf1, (ROT, 1.0, 2.0); verbose=true, iloss=3)
@test check_grad(testf1, (IROT, 1.0, 2.0); verbose=true, iloss=2)
@test check_grad(testf1, (IROT, 1.0, 2.0); verbose=true, iloss=3)
# ROT and HADAMARD does not allow different types of rotation elements
end
@testset "sincos" begin
@i function f(s, c, x)
(s, c) += sincos(x)
end
@test check_grad(f, (1.0, 2.0, 2.0); verbose=true, iloss=1)
@test check_grad(f, (1.0, 2.0, 2.0); verbose=true, iloss=2)
end
@testset "AD over pop" begin
@i function mean(out!::T, x) where T
anc ← zero(out!)
for i=1:length(x)
anc += x[i]
end
out! += anc / (@const length(x))
FLOAT64_STACK[end+1] ↔ anc::T
end
@test check_grad(mean, (0.0, [1,2,3.0, 4.0]); iloss=1)
end
@testset "AD over pipe" begin
@i function mean(out!, anc, x)
for i=1:length(x)
PlusEq(identity)(anc, x[i])
SWAP(anc, x[i])
end
out! += anc / (@const length(x))
end
@test check_grad(mean, (0.0, 0.0, [1,2,3.0, 4.0]); iloss=1, verbose=true)
end
@testset "push, load data" begin
stack = []
val = [1,2,3]
@instr PUSH!(stack, val)
@test val == Int[]
val = 3.0
@instr PUSH!(stack, val)
@test val == 0.0
val = 3.0
@instr PUSH!(stack, val)
x = GVar(3.0)
#@test_throws InvertibilityError @instr POP!(stack, x)
z = 3.0
@instr PUSH!(stack, z)
z = GVar(0.0)
@instr POP!(stack, z)
@test z == GVar(3.0)
x = [1.0, 2.0, 3.0]
@instr PUSH!(stack, x)
y = empty(x)
@instr POP!(stack, y)
@test y == GVar.([1,2,3.0])
x = [1.0, 2.0, 3.0]
@instr PUSH!(stack, x)
y = empty(x)
@instr POP!(stack, y)
@test y == [1,2,3.0]
end
@testset "dataviews" begin
@i function f(z, y, x)
y += cos(x |> INV)
z += tan(y |> AddConst(4.0))
z += y * (x |> NEG |> SubConst(0.5) |> INV)
z += sin(x |> INV)
end
@test check_grad(f, (0.2, 0.5, 0.8); iloss=1)
end
@testset "additive identity" begin
struct TestAdd2{T}
x::T
y::Vector{T}
end
x = TestAdd2(GVar(1.0, 2.0), [GVar(2.0, 1.2)])
y = TestAdd2(GVar(6.0, 3.0), [GVar(4.0, 4.1)])
@test getfield.(MinusEq(identity)(x, y), :x) == getfield.((TestAdd2(GVar(-5.0, 2.0), [GVar(-2.0, 1.2)]), TestAdd2(GVar(6.0, 5.0), [GVar(4.0, 5.3)])), :x)
x = TestAdd2(GVar(1.0, 2.0), [GVar(2.0, 1.2)])
y = TestAdd2(GVar(6.0, 3.0), [GVar(4.0, 4.1)])
@test getfield.(MinusEq(identity)(x, y), :y) == getfield.((TestAdd2(GVar(-5.0, 2.0), [GVar(-2.0, 1.2)]), TestAdd2(GVar(6.0, 5.0), [GVar(4.0, 5.3)])), :y)
end
================================================
FILE: test/autodiff/jacobian.jl
================================================
using NiLang, NiLang.AD
using Test
using NiLang.AD: wrap_bcastgrad
@i function asarrayfunc(params; f, kwargs...)
if (length(params) == 1, ~)
f(params[1]; kwargs...)
elseif (length(params) == 2, ~)
f(params[1], params[2]; kwargs...)
elseif (length(params) == 3, ~)
f(params[1], params[2], params[3]; kwargs...)
end
end
@testset "bcastgrad" begin
T = AutoBcast{Int, 4}
@test wrap_bcastgrad(T, ones(10)) == [GVar(1.0, AutoBcast(ones(4))) for i=1:10]
@test wrap_bcastgrad(T, 3) == 3
@test wrap_bcastgrad(T, NoGrad(3.0)) == 3.0
@test wrap_bcastgrad(T, 3.0) == GVar(3.0, AutoBcast(ones(4)))
@test wrap_bcastgrad(T, (3.0,)) == (GVar(3.0, AutoBcast(ones(4))),)
@test wrap_bcastgrad(T, exp) == exp
@test wrap_bcastgrad(T, Inv(exp)) == Inv(exp)
end
@testset "jacobians" begin
for op in [PlusEq(*), PlusEq(/), PlusEq(^), ROT]
j1 = NiLang.AD.jacobian(asarrayfunc, [0.3, 0.4, 2.0]; iin=1, f=op)
j2 = NiLang.AD.jacobian_repeat(asarrayfunc, [0.3, 0.4, 2.0]; iin=1, f=op)
@test j1 ≈ j2
end
for op in [PlusEq(identity), PlusEq(abs), SWAP, PlusEq(exp), PlusEq(log), PlusEq(sin), PlusEq(cos)]
j1 = NiLang.AD.jacobian(asarrayfunc, [0.3, 0.4]; iin=1, f=op)
j2 = NiLang.AD.jacobian_repeat(asarrayfunc, [0.3, 0.4]; iin=1, f=op)
@test j1 ≈ j2
end
for op in [-, NEG]
j1 = NiLang.AD.jacobian(asarrayfunc, [0.3]; iin=1, f=op)
j2 = NiLang.AD.jacobian_repeat(asarrayfunc, [0.3]; iin=1, f=op)
@test j1 ≈ j2
end
end
@testset "nograd" begin
@test AddConst(3.0)(NoGrad(2.0)) == NoGrad(5.0)
@test SWAP(NoGrad(2.0), NoGrad(3.0)) == (NoGrad(3.0), NoGrad(2.0))
@test PlusEq(*)(NoGrad(2.0), NoGrad(3.0), NoGrad(4.0)) == (NoGrad(14.0), NoGrad(3.0), NoGrad(4.0))
end
================================================
FILE: test/autodiff/manual.jl
================================================
using NiLang, Test
using NiLang.AD
test_func(x) = exp(x)
NiLang.AD.primitive_grad(::typeof(test_func), x) = exp(x)
test_g(x, y; k=0) = x^k * y
function NiLang.AD.primitive_grad(::typeof(test_g), x, y; k=0)
return k*x^(k-1)*y, x^k
end
@testset "primitive grad" begin
@test check_grad(PlusEq(test_func), (1.0, 1.0), iloss=1)
@test check_grad(PlusEq(test_g), (1.0, 3.0, 2.0), k=2, iloss=1)
end
================================================
FILE: test/autodiff/stack.jl
================================================
using NiLang, Test, NiLang.AD
@testset "loaddata" begin
@test NiLang.loaddata(GVar(0.1), 0.3) == GVar(0.3)
@test NiLang.loaddata(Complex(GVar(0.1, 0.2), GVar(0.2)), 0.3+0.6im) == Complex(GVar(0.3), GVar(0.6))
@test NiLang.loaddata(typeof(Complex(GVar(0.1, 0.2), GVar(0.2))), 0.3+0.6im) == Complex(GVar(0.3), GVar(0.6))
@test NiLang.loaddata(GVar(0.2, AutoBcast{Float64,3}(zeros(3))), 0.3) == GVar(0.3, AutoBcast{Float64,3}(zeros(3)))
@test NiLang.loaddata((GVar(0.2, AutoBcast{Float64,3}(zeros(3))), 7), (0.3, 4)) == (GVar(0.3, AutoBcast{Float64,3}(zeros(3))), 4)
@test NiLang.loaddata(typeof((GVar(0.2, AutoBcast{Float64,3}(zeros(3))), 7)), (0.3, 4)) == (GVar(0.3, AutoBcast{Float64,3}(zeros(3))), 4)
@test NiLang.loaddata(4, 2.0) == 2
end
@testset "push load" begin
x = (0.3, 3.0, [1,2,3.0])
@instr PUSH!(x)
t = (0.0, 0.0, Float64[])
@test x == t && typeof(x) == typeof(t)
y = (0.0, GVar(0.0), GVar{Float64,Float64}[])
@instr POP!(y)
t = (0.3, GVar(3.0), GVar([1,2, 3.0]))
@test y == t && typeof(y) == typeof(t)
x = [0.3, 3.0, [1,2,3.0]]
@instr PUSH!(x)
t = []
@test x == t && typeof(x) == typeof(t)
y = []
@instr POP!(y)
t = [0.3, GVar(3.0), GVar([1,2, 3.0])]
@test y == t && typeof(y) == typeof(t)
x = (0.3, 3.0, [1,2,3.0])
@instr @invcheckoff PUSH!(x)
t = (0.0, 0.0, Float64[])
@test x == t && typeof(x) == typeof(t)
y = (0.0, GVar(0.0), GVar(zeros(0)))
@instr @invcheckoff POP!(y)
t = (0.3, GVar(3.0), GVar([1,2, 3.0]))
@test y == t && typeof(y) == typeof(t)
x = (0.3, 3.0, [1,2,3.0])
@instr @invcheckoff COPYPUSH!(x)
t = (0.3, 3.0, [1,2,3.0])
@test x == t && typeof(x) == typeof(t)
y = (0.3, GVar(t[2]), GVar(t[3]))
@instr @invcheckoff COPYPOP!(y)
t = (0.3, GVar(3.0), GVar([1,2, 3.0]))
@test y == t && typeof(y) == typeof(t)
x = (0.3, 3.0, [1,2,3.0])
@instr COPYPUSH!(x)
t = (0.3, 3.0, [1,2,3.0])
@test x == t && typeof(x) == typeof(t)
y = (0.3, GVar(t[2]), GVar(t[3]))
@instr COPYPOP!(y)
t = (0.3, GVar(3.0), GVar([1,2, 3.0]))
@test y == t && typeof(y) == typeof(t)
x = [0.3, 3.0, [1,2,3.0]]
@instr COPYPUSH!(x)
t = [0.3, 3.0, [1,2,3.0]]
@test x == t && typeof(x) == typeof(t)
y = [0.3, GVar(t[2]), GVar(t[3])]
@instr COPYPOP!(y)
t = [0.3, GVar(3.0), GVar([1,2, 3.0])]
@test y == t && typeof(y) == typeof(t)
end
================================================
FILE: test/autodiff/ulog.jl
================================================
using NiLang, NiLang.AD
using Test, Random
using FixedPointNumbers
using NiLangCore: default_constructor
using FiniteDifferences
@testset "ULogarithmic" begin
@test check_grad(PlusEq(gaussian_log), (1.0, 2.0); iloss=1)
function muleq(f, x::T, y::T, z::T) where T
x = default_constructor(ULogarithmic{T}, x)
y = default_constructor(ULogarithmic{T}, y)
z = default_constructor(ULogarithmic{T}, z)
x *= f(y, z)
x.log
end
g1, = FiniteDifferences.grad(central_fdm(5,1), arr->muleq(+, arr...), [7.0, 5.0, 3.0])
x, y, z = default_constructor(ULogarithmic{Float64}, 7.0),
default_constructor(ULogarithmic{Float64}, 5.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr (MulEq(+))(x, y, z)
@instr GVar(x)
@instr GVar(y)
@instr GVar(z)
@instr x.log.g += 1
@instr (~MulEq(+))(x, y, z)
@test grad(x.log) ≈ g1[1]
@test grad(y.log) ≈ g1[2]
@test grad(z.log) ≈ g1[3]
g2, = FiniteDifferences.grad(central_fdm(5,1), arr->muleq(-, arr...), [7.0, 5.0, 3.0])
x, y, z = default_constructor(ULogarithmic{Float64}, 2.0),
default_constructor(ULogarithmic{Float64}, 5.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr (MulEq(-))(x, y, z)
@instr GVar(x)
@instr GVar(y)
@instr GVar(z)
@instr x.log.g += 1
@instr (~MulEq(-))(x, y, z)
@test grad(x.log) ≈ g2[1]
@test grad(y.log) ≈ g2[2]
@test grad(z.log) ≈ g2[3]
end
@testset "iexp" begin
@i function i_exp(y!::T, x::T) where T<:Union{Fixed, GVar{<:Fixed}}
@invcheckoff begin
@routine begin
s ← one(ULogarithmic{T})
lx ← one(ULogarithmic{T})
k ← 0
end
lx *= convert(x)
y! += convert(s)
@from k==0 while s.log > -20
k += 1
s *= lx / k
y! += convert(s)
end
~(@from k==0 while s.log > -20
k += 1
s *= x / k
end)
lx /= convert(x)
~@routine
end
end
x = Fixed43(3.5)
res = i_exp(Fixed43(0.0), x)[1]
gx = grad(Grad(i_exp)(Val(1), Fixed43(0.0), x)[3])
@test res ≈ exp(3.5)
@test gx ≈ exp(3.5)
end
================================================
FILE: test/autodiff/vars.jl
================================================
using NiLang, NiLang.AD
using Test
@testset "gvar" begin
g1 = GVar(0.0)
@test (~GVar)(g1) === 0.0
@assign (g1 |> grad) 0.5
@test g1 === GVar(0.0, 0.5)
@test_throws InvertibilityError (~GVar)(g1)
@test !almost_same(GVar(0.0), GVar(0.0, 1.0))
@test zero(GVar(3.0, 2.0)) == GVar(0.0)
@test one(GVar(3.0, 2.0)) == GVar(1.0)
@test iszero(GVar(0.0, 2.0))
@test zero(GVar(2, AutoBcast([1, 0, 0]))) == GVar(0, AutoBcast([0, 0, 0]))
@test GVar(true) == true
@test grad("x") == ""
@test grad((1.0, GVar(1.0, 2.0))) == (0.0,2.0)
@test grad(grad) == 0
@test grad((1.0, 2.0)) == (0.0,0.0)
@test grad([1.0, 2.0]) == [0.0,0.0]
@test grad([GVar(1.0, 3.0), GVar(2.0, 1.0)]) == [3.0,1.0]
@test grad(Complex(GVar(1.0, 3.0), GVar(2.0, 1.0))) == Complex(3.0,1.0)
@test grad(Complex(1.0, 2.0)) == Complex(0.0,0.0)
end
@testset "assign" begin
arg = (1,2,GVar(3.0))
@assign (arg.:3).g 4.0
@test arg[3].g == 4.0
gv = GVar(1.0, GVar(0.0))
@test gv.g.g === 0.0
@assign gv.g.g 7.0
@test gv.g.g === 7.0
gv = GVar(1.0, GVar(0.0))
@assign gv |> grad |> grad 0.0
@test gv.g.g === 0.0
args = (GVar(0.0, 1.0),)
@assign (args.:1 |> grad) 0.0
@test args[1].g == 0.0
arr = [1.0]
arr0 = arr
@assign arr[] 0.0
@test arr[] == 0.0
@test arr === arr0
end
@testset "assign tuple" begin
x = 0.3
@instr for i=1:length(x) GVar(x) end
@test x === GVar(0.3)
end
@testset "assign bcast func" begin
# vector bcast
x = [GVar(0.1, 0.1), GVar(0.2, 0.2)]
res = [1.0, 2.0]
@assign (x .|> value) res
@test x == [GVar(1.0, 0.1), GVar(2.0, 0.2)]
# tuple bcast
x = (GVar(0.1, 0.1), GVar(0.2, 0.2))
res = (1.0, 2.0)
@assign (x .|> value) res
@test x == (GVar(1.0, 0.1), GVar(2.0, 0.2))
end
@testset "GVar over general type" begin
struct ABC{T1, T2}
a::T1
b::T1
c::T2
end
x = ABC(1, 2, 3.0)
@test GVar(x) == ABC(1, 2, GVar(3.0))
@test GVar(x, x) == ABC(GVar(1, 1), GVar(2, 2), GVar(3.0, 3.0))
@test (~GVar)(ABC(1, 2, GVar(3.0))) == x
@test grad(ABC(1, 2, GVar(3.0, 2.0))) == ABC(0, 0, 2.0)
@test GVar(1.0 + 2.0im , 2.0im + 4.0im) == Complex(GVar(1.0, 2.0), GVar(2.0, 4.0))
@test GVar((1.0, 2.0im) , (2.0im, 4.0im)) == (GVar(1.0, 2.0), Complex(GVar(0.0), GVar(2.0, 4.0)))
# without type parameter
struct EFG
x
end
@test GVar(EFG) == EFG
@test grad(EFG(GVar(2.0, 3.0))) == EFG(3.0)
end
@testset "dict" begin
@i function f()
d ← Dict(1=>GVar(1.0, 2.0))
d → Dict(1=>GVar(1.0))
end
@test f() == ()
end
@testset "NoGrad" begin
a = NoGrad(0.5)
@test a isa NoGrad
@test zero(a) == NoGrad(0.0)
@test (~NoGrad)(a) === 0.5
@test -NoGrad(0.5) == NoGrad(-0.5)
a2 = NoGrad{Float64}(a)
@test a2 === a
println(a2)
@test chfield(a2, NoGrad, NoGrad(0.4)) === 0.4
@test unwrap(NoGrad(a)) == 0.5
@test NoGrad(a) < 0.6
@test NoGrad(a) <= 0.6
@test NoGrad(a) >= 0.4
@test a ≈ 0.5
@test a == 0.5
@test a > 0.4
@test isless(a, 0.6)
end
================================================
FILE: test/complex.jl
================================================
using Test, NiLang
@testset "complex" begin
a = 1.0+ 2im
@instr (a |> real) += 2
@instr (a |> imag) += 2
@test a == 3.0 + 4im
a = 1.0+ 2im
@instr a += complex(2.0, 2.0)
@test a == 3.0 + 4.0im
@i function f(loss, a::Complex{T}, b) where T
@routine begin
c ← zero(a)
sq ← zero(T)
c += a * b
sq += (c |> real) ^ 2
sq += (c |> imag) ^ 2
end
loss += sq ^ 0.5
~@routine
end
a = 1.0 + 2.0im
b = 2.0 + 1.0im
loss = 0.0
@instr f(loss, a, b)
@test loss ≈ abs(a*b)
end
@testset "complex arithmetics" begin
for op in [exp, log, identity]
x, y = 2.0+1.0im, 0.5+0.2im
@instr x += op(y)
@test x ≈ 2.0+1.0im + op(0.5+0.2im)
end
for op in [SWAP, HADAMARD]
x, y = 2.0+1.0im, 0.5+0.2im
@instr op(x, y)
@test x ≈ op(2.0+1.0im, 0.5+0.2im)[1]
@test y ≈ op(2.0+1.0im, 0.5+0.2im)[2]
end
for op in [NEG, INC, DEC]
x = 2.0+1.0im
@instr op(x)
@test x ≈ op(2.0+1.0im)
end
for op in [^, /, +, -]
x, y, z = 2.0+1.0im, 0.5+0.2im, 0.8-2.0im
@instr PlusEq(op)(x, y, z)
@test x ≈ 2.0+1.0im + op(0.5+0.2im, 0.8-2.0im)
end
end
================================================
FILE: test/instructs.jl
================================================
using NiLang
using Test
@testset "identity" begin
x, y = 0.2, 0.5
@instr x += identity(y)
@test x == 0.7 && y==0.5
end
@testset "*, /" begin
x, y, out = 2.0, 2.0, 1.0
@instr out += x * y
@test x == 2.0 && y == 2.0 && out == 5.0
x, y, out = 2.0, 2.0, 1.0
@instr out += x / y
@test x == 2.0 && y == 2.0 && out == 2.0
out = Fixed43(0.0)
x = 1
@instr out += x/2
@test out === Fixed43(0.5)
@instr out -= x/2
@test out === Fixed43(0.0)
end
@testset "SWAP" begin
x, y = 1, 2
@instr SWAP(x, y)
@test x == 2 && y == 1
end
@testset "NEG" begin
x = 0.3
@instr NEG(x)
@test x == -0.3
@test check_inv(NEG, (x,))
end
@testset "INV" begin
x = 0.2
@instr INV(x)
@test x == 5.0
@test check_inv(INV, (x,))
end
@testset "AddConst" begin
x = 0.3
@instr AddConst(4.0)(x)
@test x == 4.3
@test check_inv(AddConst(4.0), (x,))
x = 0.3
@instr SubConst(4.0)(x)
@test x == -3.7
@test check_inv(SubConst(4.0), (x,))
end
@testset "FLIP" begin
x = false
@instr FLIP(x)
@test x == true
@test check_inv(FLIP, (x,))
end
@testset "ROT" begin
x, y, θ = 0.0, 1.0, π
@test check_inv(ROT, (x, y, θ); verbose=true)
@test check_inv(IROT, (x, y, θ); verbose=true)
end
@testset "INC, DEC" begin
x = Int32(2)
@instr INC(x)
@test x === Int32(3)
@instr DEC(x)
@test x === Int32(2)
end
@testset "HADAMARD" begin
x = 0.5
y = 0.8
@test check_inv(HADAMARD, (x, y))
end
@testset "dataviews" begin
@i function f(z, y, x)
y += cos(x |> INV)
z += tan(y |> AddConst(4.0))
z += y * (x |> NEG |> SubConst(0.5) |> INV)
z += sin(x |> INV)
end
@test check_inv(f, (0.2, 0.5, 0.8))
end
@testset "fixed point arithmetics" begin
for op in [exp, log, sin, sinh, asin, cos, cosh, acos, tan, tanh, atan]
x, y = Fixed43(2.0), Fixed43(0.5)
@instr x += op(y)
@test x ≈ 2.0 + op(0.5)
end
for op in [SWAP, HADAMARD]
x, y = Fixed43(2.0), Fixed43(0.5)
@instr op(x, y)
@test x ≈ op(2.0, 0.5)[1]
@test y ≈ op(2.0, 0.5)[2]
end
for op in [NEG, INC, DEC]
x = Fixed43(2.0)
@instr op(x)
@test x ≈ op(2.0)
end
for op in [^, /]
x, y, z = Fixed43(2.0), Fixed43(0.5), Fixed43(0.8)
@instr PlusEq(op)(x, y, z)
@test x ≈ 2.0 + op(0.5, 0.8)
end
end
@testset "additive identity" begin
struct TestAdd{T}
x::T
y::Vector{T}
end
@test getfield.(PlusEq(identity)(TestAdd(1, [2]), TestAdd(10, [2])), :x) == (TestAdd(11, [4]).x, TestAdd(10, [2]).x)
@test getfield.(PlusEq(identity)(TestAdd(1, [2]), TestAdd(10, [2])), :y) == (TestAdd(11, [4]).y, TestAdd(10, [2]).y)
end
================================================
FILE: test/macros.jl
================================================
using Test, NiLang
using NiLang: auto_alloc, auto_expand
NiLang.alloc(::typeof(NiLang.i_sum), x::AbstractArray{T}) where T = zero(T)
@testset begin
@test auto_alloc(:(y = exp(x))) == Expr(:block, :(y ← $alloc(exp, x)), :(y += exp(x)))
ex1 = :(PlusEq(sin)(z, sin(x + 2y)))
ex2 = auto_expand(ex1)
@test length(ex2.args) == 13
@i function test(x, y, z)
#@auto_expand z += sin(x + 2y)
@invcheckoff @auto_expand z += sin(x + 2y)
end
x, y, z = 1.0, 2.0, 3.0
@test test(x, y, z)[3] == z + sin(x + 2y)
@test check_inv(test, (x, y, z))
@i function test(x, y, z, a)
@auto_expand PlusEq(sin)(Complex{}(x, y), Complex{}(z, sin(a)))
end
x, y, z, a = 1.0, 2.0, 3.0, 4.0
@test Complex(test(x, y, z, a)[1:2]...) == 1+im*y + sin(z+im*sin(a))
@test check_inv(test, (x, y, z, a))
@i function test2(y, x)
@routine begin
@auto_alloc i_sum(z, x)
end
y += z
~@routine
end
@test test2(1.0, [2,3.0])[1] == 6.0
@test check_inv(test2, (1.0, [2,3.0]))
end
================================================
FILE: test/runtests.jl
================================================
using NiLang
using Test
@testset "vars.jl" begin
include("vars.jl")
end
@testset "utils.jl" begin
include("utils.jl")
end
@testset "wrappers.jl" begin
include("wrappers.jl")
end
@testset "instructs.jl" begin
include("instructs.jl")
end
@testset "complex.jl" begin
include("complex.jl")
end
@testset "ulog.jl" begin
include("ulog.jl")
end
@testset "macros.jl" begin
include("macros.jl")
end
@testset "autobcast.jl" begin
include("autobcast.jl")
end
@testset "autodiff" begin
include("autodiff/autodiff.jl")
end
@testset "stdlib" begin
include("stdlib/stdlib.jl")
end
================================================
FILE: test/stdlib/base.jl
================================================
using NiLang, NiLang.AD
using Test
@testset "sqdistance" begin
@test i_sqdistance(0.0, [1.0, 0.0], [0.0, 1.0])[1] == 2.0
end
================================================
FILE: test/stdlib/bennett.jl
================================================
using Test
using NiLang, NiLang.AD
@testset "integrate" begin
FT = Float64
n = 100
h = FT(π/n)
dt = FT(0.01)
α = FT(4e-2)
@i function step!(dest::AbstractArray{T}, src::AbstractArray{T}; α, h, dt) where T
n ← length(dest)
@invcheckoff for i=1:n
@routine begin
@zeros T cum g h2 αcum
cum += src[mod1(i+1, n)] + src[mod1(i-1, n)]
cum -= 2*src[i]
αcum += cum * α
h2 += h^2
g += αcum/h2
end
dest[i] += src[i]
dest[i] += dt*g
~@routine
end
n → length(dest)
end
x = zeros(FT, n)
x[n÷2] = 1
#state = Dict{Int,Vector{FT}}()
k = 4
N = 100
x_last = NiLang.direct_emulate(step!, FT.(x); N=N, α=α, h=h, dt=dt)
log1 = NiLang.BennettLog()
log2 = NiLang.BennettLog()
log3 = NiLang.BennettLog()
_, x_last_b, _ = bennett(step!, zero(FT.(x)), FT.(x); k=k, N=N, α=α, h=h, dt=dt, logger=log1)
_, x_last_b2 = bennett!(step!, Dict(1=>FT.(x)); k=k, N=N, α=α, h=h, dt=dt, logger=log2)
_, x_last_b3 = bennett!([step! for _=1:100], Dict(1=>FT.(x)); k=k, N=N, α=α, h=h, dt=dt, logger=log3)
@test sum(x_last_b) ≈ 1
@test x_last ≈ x_last_b
@test x_last ≈ x_last_b2[N+1]
@test x_last ≈ x_last_b3[N+1]
@test length(log1.fcalls) > length(log2.fcalls)
@test length(log1.fcalls) < 2*length(log2.fcalls)
@test length(log3.fcalls) == length(log2.fcalls)
@i function loss(out, step, y, x; kwargs...)
bennett((@skip! step), y, x; kwargs...)
out += y[n÷2]
end
@i function loss2(out, step, d; N, kwargs...)
bennett!((@skip! step), d; N, kwargs...)
out += d[N+1][n÷2]
end
_, _, _, gx = NiLang.AD.gradient(loss, (0.0, step!, zero(x), copy(x)); iloss=1, k=k, N=N, α=α, h=h, dt=dt)
_, _, gx2 = NiLang.AD.gradient(loss2, (0.0, step!, Dict(1=>copy(x))); iloss=1, k=k, N=N, α=α, h=h, dt=dt)
x_last_2 = NiLang.direct_emulate(step!, (x2=copy(x); x2[n÷2]+=1e-5; FT.(x2)); N=N, α=α, h=h, dt=dt)
@test gx[n÷2] ≈ (x_last_2 - x_last)[n÷2]/1e-5
@test gx2[1][n÷2] ≈ (x_last_2 - x_last)[n÷2]/1e-5
end
================================================
FILE: test/stdlib/blas.jl
================================================
using Test
using LinearAlgebra
using NiLang, NiLang.AD
@testset "i_norm2, dot" begin
out = 0.0im
vec = [1.0im, 2.0, 3.0]
vec2 = [1.0, 2.0im, 5.0]
@instr i_norm2(out, vec)
@test out ≈ norm(vec)^2
@test check_inv(i_norm2, (out, vec))
out = 0.0im
vec = [1.0im, 2.0, 3.0]
vec2 = [1.0, 2.0im, 5.0]
@instr i_dot(out, vec, vec2)
@test out ≈ dot(vec, vec2)
@test check_inv(i_dot, (out, vec, vec2))
out = 0.0
vec = [1.0, 2.0, 3.0]
vec2 = [1.0, 2.0, 5.0]
@test check_grad(i_norm2, (out, vec); verbose=true, iloss=1)
out = 0.0
@instr i_dot(out, vec, vec2)
@test out ≈ dot(vec, vec2)
@test check_inv(i_dot, (out, vec, vec2))
@test check_grad(i_dot, (0.0, vec, vec2); verbose=true, iloss=1)
m = randn(4,4)
n = randn(4,4)
out = 0.0
@instr i_dot(out, m[:,2], n[:,4])
@test out ≈ dot(m[:,2], n[:,4])
@test check_inv(i_dot, (out, m[:,2], n[:,4]))
@test check_grad(i_dot, (0.0, vec, vec2); verbose=true, iloss=1)
end
function naive_umm!(x, params)
N = size(x, 1)
k = 0
for j=1:N
for i=N-1:-1:j
k += 1
a, b = rot(x[i],x[i+1],params[k])
x[i], x[i+1] = a, b
end
end
end
function inv_naive_umm!(x, params)
N = size(x, 1)
k = N*(N-1) ÷ 2
for j=N:-1:1
for i=j:N-1
a, b = rot(x[i],x[i+1],-params[k])
x[i], x[i+1] = a, b
k -= 1
end
end
end
@testset "naive unitary" begin
x = randn(200)
params = randn(100*199).*2π
x0 = copy(x)
params0 = copy(params)
naive_umm!(x, params)
inv_naive_umm!(x, params)
@test params ≈ params0
@test x ≈ x0
end
@testset "unitary" begin
x = randn(20)
params = randn(10*19) * 2π
x0 = copy(x)
params0 = copy(params)
Nx = length(x)
@instr i_umm!(x, params)
x1 = copy(x0)
params1 = copy(params0)
naive_umm!(x1, params1)
@test params ≈ params1
@test x ≈ x1
@instr (~i_umm!)(x, params)
@test params ≈ params0
@test x ≈ x0
@test check_inv(i_umm!, (x, params))
end
================================================
FILE: test/stdlib/linalg.jl
================================================
using Test
using NiLang, NiLang.AD
using LinearAlgebra
using Random
@testset "inv" begin
Random.seed!(2)
id = [1 0 0; 0 1 0; 0 0 1.0]
@test check_inv(i_inv!, (randn(3, 3), randn(3, 3)))
@test check_inv(PlusEq(det), (0.3, randn(3, 3)))
@test check_inv(PlusEq(logdet), (0.3, rand(3, 3) .+ id))
@test check_grad(PlusEq(det), (0.3, randn(3, 3)), iloss=1)
@test check_grad(PlusEq(logdet), (0.3, rand(3, 3) .+ id), iloss=1)
@i function loss(out!, y, A)
i_inv!(y, A)
out! += y[1,1]
end
@test check_grad(loss, (0.0, randn(3, 3), randn(3, 3)); iloss=1)
end
@testset "affine" begin
Random.seed!(2)
A = randn(5, 5)
b = randn(5)
x = randn(5)
y! = zeros(5)
@test i_affine!(y!, A, b, x)[1] ≈ A*x + b
end
================================================
FILE: test/stdlib/mapreduce.jl
================================================
using NiLang, Test
@testset "filter and mapfoldl" begin
@i function f(z, y, x)
i_filter!((@skip! x -> x < 0), y, x)
i_mapfoldl((@skip! exp), (@skip! PlusEq(identity)), z, y)
end
@test f(0.0, Float64[], [-1, -0.5, 3])[1] == exp(-0.5) + exp(-1)
end
================================================
FILE: test/stdlib/nnlib.jl
================================================
using Test, Random
using NiLang, NiLang.AD
function _sce(x::AbstractArray{T,N}, p) where {T,N}
x = x .- maximum(x; dims=N) # avoid data overflow
rho = exp.(x)
Z = sum(rho; dims=N)
return dropdims(sum((log.(Z) .- x) .* p; dims=N), dims=N)
end
@testset "softmax_crossentropy" begin
Random.seed!(2)
x = randn(10)
x0 = copy(x)
p = randn(10); p=p./maximum(p)
res = _sce(x, p)
imax = 0
Z = 0.0
out = 0.0
xmax = 0.0
x_ = x
p_ = p
@instr i_softmax_crossentropy(x_, p_, imax, xmax, Z, out)
@show Z
@test isapprox(imax, argmax(x0), atol=1e-8)
@test isapprox(out, res[], atol=1e-8)
@instr (~i_softmax_crossentropy)(x_, p_, imax, xmax, Z, out)
args = x_, p_, imax, xmax, Z, out
@test check_inv(i_softmax_crossentropy, args)
args = x_, p_, imax, xmax, Z, out
@test check_grad(i_softmax_crossentropy, args; iloss=6, verbose=true)
end
@testset "logsumexp" begin
function logsumexp2(x)
mx = maximum(x)
log.(sum(exp.(x .- mx))) .+ mx
end
x = randn(100)
@test i_ascending!(Float64[], Int[], x)[1][end] == maximum(x)
@test i_logsumexp(0.0, 0.0, Float64[], Int[], x)[1] ≈ logsumexp2(x)
@test check_inv(i_logsumexp, (0.0, 0.0, Float64[], Int[], x))
end
================================================
FILE: test/stdlib/sparse.jl
================================================
using NiLang
using SparseArrays
using Test, Random
@testset "dot" begin
Random.seed!(2)
sp1 = sprand(10, 10,0.3)
sp2 = sprand(10, 10,0.3)
@test SparseArrays.dot(sp1, sp2) ≈ i_dot(0.0, sp1, sp2)[1]
end
@testset "mul!" begin
Random.seed!(2)
sp1 = sprand(10, 10,0.3)
v = randn(10)
out = zero(v)
@test SparseArrays.mul!(copy(out), sp1, v, 0.5, 1) ≈ i_mul!(copy(out), sp1, v, 0.5, 1)[1]
end
================================================
FILE: test/stdlib/statistics.jl
================================================
import Statistics
using Test, Random
using NiLang, NiLang.AD
using Distributions
@testset "statistics" begin
x = randn(100)
y = randn(100)
@test i_mean_sum(0.0, 0.0, x)[1] ≈ Statistics.mean(x)
info = VarianceInfo(Float64)
@test almost_same(i_var_mean_sum(info, copy(x))[1], VarianceInfo(Statistics.var(x), Statistics.var(x)*99, Statistics.mean(x), sum(x)))
@test almost_same((~i_var_mean_sum)(i_var_mean_sum(info, copy(x))...), (info, x))
@test almost_same(i_cor_cov(0.0, 0.0, copy(x), copy(y)), (Statistics.cor(x,y), Statistics.cov(x,y), x, y))
@test almost_same((~i_cor_cov)(i_cor_cov(0.0, 0.0, copy(x), copy(y))...), (0.0, 0.0, x, y))
end
@testset "normal log pdf" begin
out = 0.0
x = 1.0
μ = 0.3
σ = 1.5
l1 = i_normal_logpdf(out, x, μ, σ)
distri = Normal(μ, σ)
l2 = logpdf(distri, x)
@test l1[1] ≈ l2
@test check_inv(i_normal_logpdf, (out, x, μ, σ))
end
================================================
FILE: test/stdlib/stdlib.jl
================================================
include("base.jl")
include("blas.jl")
include("linalg.jl")
include("statistics.jl")
include("nnlib.jl")
include("sparse.jl")
include("mapreduce.jl")
include("bennett.jl")
================================================
FILE: test/ulog.jl
================================================
using NiLang, NiLang.AD
using Test, Random
using NiLangCore: default_constructor
@testset "basic instructions, ULogarithmic" begin
x, y = default_constructor(ULogarithmic{Int}, 1),
default_constructor(ULogarithmic{Int}, 2)
@instr x *= y
@test x == default_constructor(ULogarithmic{Int}, 3)
@test y == default_constructor(ULogarithmic{Int}, 2)
@test PlusEq(gaussian_log)(1.0, 2.0) == (1.0+log(1+exp(2.0)), 2.0)
x, y, z = default_constructor(ULogarithmic{Float64}, 7.0),
default_constructor(ULogarithmic{Float64}, 2.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr x *= y + z
@test check_inv(MulEq(+), (x, y, z))
@test x.log ≈ log(exp(7.0) * (exp(2.0) + exp(3.0)))
x, y, z = default_constructor(ULogarithmic{Float64}, 7.0),
default_constructor(ULogarithmic{Float64}, 5.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr x *= y - z
@test x.log ≈ log(exp(7.0) * (exp(5.0) - exp(3.0)))
x, y, z = default_constructor(ULogarithmic{Float64}, 7.0),
default_constructor(ULogarithmic{Float64}, 5.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr x *= y^3.4
@test x.log ≈ log(exp(5.0)^3.4 * exp(7.0))
x, y, z = default_constructor(ULogarithmic{Float64}, 7.0),
default_constructor(ULogarithmic{Float64}, 5.0),
default_constructor(ULogarithmic{Float64}, 3.0)
@instr x *= 3
@test x.log ≈ log(exp(7.0) * 3)
end
@testset "error on += and -=" begin
@i function f(x::ULogarithmic)
x += ULogarithmic(3.0)
end
@test_throws MethodError f(ULogarithmic(2.0))
@test_throws MethodError (~f)(ULogarithmic(2.0))
end
================================================
FILE: test/utils.jl
================================================
using NiLang
using Test
@testset "vec dataview" begin
@i function f(x::AbstractVector, y::AbstractMatrix)
x .+= (y |> vec)
vec(y)[5] += x[4]
end
x = zeros(25)
y = ones(5,5)
z = ones(5,5)
z[5] = 2.0
@instr f(x, y)
@test y == z
end
================================================
FILE: test/vars.jl
================================================
using Test, NiLang, NiLangCore
@testset "@zeros" begin
@test (@macroexpand @zeros Float64 a b c) == :(begin
a ← zero(Float64)
b ← zero(Float64)
c ← zero(Float64)
end) |> NiLangCore.rmlines
@test (@macroexpand @ones Float64 a b c) == :(begin
a ← one(Float64)
b ← one(Float64)
c ← one(Float64)
end) |> NiLangCore.rmlines
end
================================================
FILE: test/wrappers.jl
================================================
using NiLang, Test
@testset "partial" begin
x = Partial{:im}(3+2im)
println(x)
@test x === Partial{:im,Complex{Int64},Int64}(3+2im)
@test value(x) == 2
@test chfield(x, value, 4) == Partial{:im}(3+4im)
@test zero(x) == Partial{:im}(0.0+0.0im)
@test (~Partial{:im})(x) == 3+2im
end
@testset "value" begin
x = 1.0
@test value(x) === 1.0
@assign (x |> value) 0.2
@test x == 0.2
end
struct NiTypeTest{T} <: IWrapper{T}
x::T
g::T
end
NiTypeTest(x) = NiTypeTest(x, zero(x))
@fieldview NiLang.value(invtype::NiTypeTest) = invtype.x
@fieldview gg(invtype::NiTypeTest) = invtype.g
@testset "inv type" begin
it = NiTypeTest(0.5)
@test eps(typeof(it)) === eps(Float64)
@test value(it) == 0.5
@test it ≈ NiTypeTest(0.5)
@test it > 0.4
@test it < NiTypeTest(0.6)
@test it < 7
@test 0.4 < it
@test 7 > it
@test chfield(it, value, 0.3) == NiTypeTest(0.3)
it = chfield(it, Val(:g), 0.2)
@test almost_same(NiTypeTest(0.5+1e-15), NiTypeTest(0.5))
@test !almost_same(NiTypeTest(1.0), NiTypeTest(1))
it = NiTypeTest(0.5)
@test chfield(it, gg, 0.3) == NiTypeTest(0.5, 0.3)
end