[
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2020 Sammy\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# BayesBook\n\n「機械学習スタートアップシリーズ ベイズ推論による機械学習入門」のソースコードをアップしています。\n* http://www.kspub.co.jp/book/detail/1538320.html\n* 正誤表（第１～３刷まで） https://github.com/sammy-suyama/BayesBook/blob/master/pdf/seigo.pdf\n* 正誤表（第４刷まで） https://github.com/sammy-suyama/BayesBook/blob/master/pdf/seigo_v4.pdf\n\nソースコードはJuliaで書かれています。（推奨Vesion:0.6.0）\n* The Julia Language: http://julialang.org/\n* Julia Documentation: http://docs.julialang.org/\n\nグラフの描画やテストデータのダウンロードに一部Pythonライブラリを利用しています。\n* Python: https://www.python.org/\n* Matplotlib: https://matplotlib.org/\n* scikit-learn: http://scikit-learn.org/\n\n上記の環境構築が煩わしい場合にはDockerfileも用意しています．\n* Docker: https://docs.docker.com/ \n"
  },
  {
    "path": "docker/Dockerfile",
    "content": "FROM python:latest\n\n# Update\nRUN apt-get update\n\n# Install matplotlib\nRUN pip3 install matplotlib scipy scikit-learn notebook\n\n# Install libraries\nRUN apt-get install -y sudo hdf5-tools libzmq3\n\n# Install julia 0.6.0\nRUN wget https://julialang-s3.julialang.org/bin/linux/x64/0.6/julia-0.6.0-linux-x86_64.tar.gz && \\\n    tar -xzf julia-0.6.0-linux-x86_64.tar.gz && \\\n    ln -s /julia-903644385b/bin/julia /usr/local/bin/julia\n\n# Set the working directory to /work\nWORKDIR /work\n\n# Add julia packages\nADD add_packages.jl /work\nRUN julia add_packages.jl\n\n# Download source codes\nRUN git clone https://github.com/sammy-suyama/BayesBook.git\n\n# Make port 8888 available to the world outside this container\nEXPOSE 8888\n\n# Start jupyter notebook\nCMD jupyter notebook --allow-root --port=8888 --ip=0.0.0.0\n"
  },
  {
    "path": "docker/README.md",
    "content": "# DockerからJupyter notebookを実行する\n\nJuliaやPythonの実行環境構築が煩わしい場合は、Dockerを使ってデモスクリプトをJupyter notebook上で動作させることができます。\nDockerのインストールに関しては公式サイトを参考ください。\n* https://docs.docker.com/engine/installation/\n\n`Dockerfile`の置いてあるディレクトリで、イメージを作成・実行します。\n\n    $ docker build -t bayesbook .\n    $ docker run -p 8888:8888 bayesbook\n\n"
  },
  {
    "path": "docker/add_packages.jl",
    "content": "\nPkg.update()\nPkg.add(\"PyPlot\")\nPkg.add(\"StatsFuns\")\nPkg.add(\"SpecialFunctions\")\nPkg.add(\"Distributions\")\nPkg.add(\"PDMats\")\nPkg.add(\"ProgressMeter\")\nPkg.add(\"DataFrames\")\nPkg.add(\"HDF5\")\nPkg.add(\"JLD\")\nPkg.add(\"IJulia\")\n"
  },
  {
    "path": "src/BayesNeuralNet.jl",
    "content": "\"\"\"\nVariational inference for Bayesian neural network\n\"\"\"\nmodule BayesNeuralNet\nusing Distributions\n\nexport sample_data_from_prior, sample_data_from_posterior\nexport VI\n\nfunction sigmoid(x)\n    return 1.0 / (1.0 + exp.(-x[1]))\nend\n\nfunction rho2sig(rho)\n    return log.(1 + exp.(rho))\nend\n\nfunction compute_df_dmu(mu, rho, W)\n    return (W - mu) ./ rho2sig(rho).^2\nend\n\nfunction compute_df_drho(Y, X, mu, rho, W)\n    return -0.5*((W - mu).^2 - rho2sig(rho).^2) .* compute_dprec_drho(rho)\nend\n\nfunction compute_dprec_drho(rho)\n    return 2 * rho2sig(rho) .^ (-3) .* (1 ./ (1+exp.(rho))).^2 .* (1 ./ (1+exp.(-rho)))\nend\n\nfunction compute_df_dw(Y, X, sigma2_y, sigma2_w, mu1, rho1, W1, mu2, rho2, W2)\n    M, N = size(X)\n    Y_err1 = zeros(size(W1)) # MxK\n    Y_err2 = zeros(size(W2)) # KxD\n\n    for n in 1 : N\n        Z = tanh.(W1'*X[:,n]) # Kx1\n        Y_est = W2'*Z\n        # 2nd unit, Dx1\n        delta2 = Y_est - Y[n]\n        \n        # 1st unit, KxD\n        delta1 = diagm(1 - Z.^2) * W2 * delta2\n        \n        Y_err1 += X[:,n] * delta1'\n        Y_err2 += Z * delta2'\n    end\n    df_dw1 = W1/sigma2_w + (mu1 - W1) ./ rho2sig(rho1).^2 + Y_err1 / sigma2_y\n    df_dw2 = W2/sigma2_w + (mu2 - W2) ./ rho2sig(rho2).^2 + Y_err2 / sigma2_y\n    return df_dw1, df_dw2\nend\n\n\"\"\"\nSample data given prior and inputs.\n\"\"\"\nfunction sample_data_from_prior(X, sigma2_w, sigma2_y, D, K)\n    M, N = size(X)\n\n    W1 = sqrt(sigma2_w) * randn(M, K)\n    W2 = sqrt(sigma2_w) * randn(K, D)\n    \n    # sample function\n    Y = [W2'* tanh.(W1'X[:,n]) for n in 1 : N]\n\n    # sample data\n    Y_obs = [W2'* tanh.(W1'X[:,n]) + sqrt(sigma2_y)*randn(D) for n in 1 : N]\n\n    return Y_obs, Y, W1, W2\nend\n\n\"\"\"\nSample data given posterior and inputs.\n\"\"\"\nfunction sample_data_from_posterior(X, mu1, rho1, mu2, rho2, sigma2_y, D)\n    N = size(X, 2)\n    ep1 = randn(size(mu1))\n    W1_tmp = mu1 + log.(1 + exp.(rho1)) .* ep1\n    ep2 = randn(size(mu2))\n    W2_tmp = mu2 + log.(1 + exp.(rho2)) .* ep2    \n    Y_est = [W2_tmp'* tanh.(W1_tmp'X[:,n]) for n in 1 : N]\n    Y_obs = [W2_tmp'* tanh.(W1_tmp'X[:,n]) + sqrt(sigma2_y)*randn(D)  for n in 1 : N]\n    return Y_est, Y_obs\nend\n\n\"\"\"\nCompute variational parameters.\n\"\"\"\nfunction VI(Y, X, sigma2_w, sigma2_y, K, alpha, max_iter)\n    M, N = size(X)\n    D = length(Y[1])\n\n    # initialize\n    mu1 = randn(M, K)\n    rho1 = randn(M, K)\n    mu2 = randn(K, D)\n    rho2 = randn(K, D)\n\n    for i in 1 : max_iter\n        # sample\n        ep1 = randn(size(mu1))\n        W1_tmp = mu1 + log.(1 + exp.(rho1)) .* ep1\n        ep2 = randn(size(mu2))\n        W2_tmp = mu2 + log.(1 + exp.(rho2)) .* ep2\n        \n        # calc error\n        df_dw1, df_dw2 = compute_df_dw(Y, X, sigma2_y, sigma2_w, mu1, rho1, W1_tmp, mu2, rho2, W2_tmp)\n        \n        # 1st unit\n        df_dmu1 = compute_df_dmu(mu1, rho1, W1_tmp)\n        df_drho1 = compute_df_drho(Y, X, mu1, rho1, W1_tmp)\n        d_mu1 = df_dw1 + df_dmu1\n        d_rho1 = df_dw1 .* (ep1 ./ (1+exp.(-rho1))) + df_drho1\n        mu1 = mu1 - alpha * d_mu1\n        rho1 = rho1 - alpha * d_rho1 \n        \n        # 2nd unit\n        df_dmu2 = compute_df_dmu(mu2, rho2, W2_tmp)\n            df_drho2 = compute_df_drho(Y, X, mu2, rho2, W2_tmp)\n        d_mu2 = df_dw2 + df_dmu2\n        d_rho2 = df_dw2 .* (ep2 ./ (1+exp.(-rho2))) + df_drho2\n        mu2 = mu2 - alpha * d_mu2\n        rho2 = rho2 - alpha * d_rho2\n    end\n    return mu1, rho1, mu2, rho2\nend\n\nend\n\n"
  },
  {
    "path": "src/DimensionalityReduction.jl",
    "content": "\"\"\"\nVariational inference for Bayesian DimensionalityReduction\n\"\"\"\nmodule DimensionalityReduction\n\nusing Distributions\n#using ProgressMeter\n\nexport DRModel\nexport sample_data, VI\n\n####################\n## Types\nstruct DRModel\n    D::Int\n    M::Int\n    sigma2_y::Float64\n    m_W::Array{Float64, 2} # MxD\n    Sigma_W::Array{Float64, 3} # MxMxD\n    m_mu::Array{Float64, 1} # D\n    Sigma_mu::Array{Float64, 2} # DxD\nend\n\n####################\n## functions\nfunction sqsum(mat::Array{Float64}, idx::Int)\n    return squeeze(sum(mat, idx), idx)\nend\n\n\"\"\"\nSample data given hyperparameters.\n\"\"\"\nfunction sample_data(N::Int, model::DRModel)\n    D = model.D\n    M = model.M\n    W = zeros(M, D)\n    mu = zeros(D)\n    for d in 1 : D\n        W[:,d] = rand(MvNormal(model.m_W[:,d], model.Sigma_W[:,:,d]))\n    end\n    mu = rand(MvNormal(model.m_mu, model.Sigma_mu))\n    \n    Y = zeros(D, N)\n    X = randn(M, N)\n    for n in 1 : N\n        Y[:,n] = rand(MvNormal(W'*X[:,n] + mu, model.sigma2_y*eye(D)))\n    end\n    return Y, X, W, mu\nend\n\nfunction init(Y::Array{Float64, 2}, prior::DRModel)\n    M = prior.M\n    D, N = size(Y)\n    X = randn(M, N)\n    XX = zeros(M, M, N)\n    for n in 1 : N\n        XX[:,:,n] = X[:,n]*X[:,n]' + eye(M)\n    end\n    return X, XX\nend\n\nfunction update_W(Y::Array{Float64, 2}, prior::DRModel, posterior::DRModel,\n                  X::Array{Float64, 2}, XX::Array{Float64, 3})\n    D = prior.D\n    M = prior.M\n    N = size(Y, 2)\n    m_W = zeros(M, D)\n    Sigma_W = zeros(M, M, D)\n    mu = posterior.m_mu\n    for d in 1 : D\n        Sigma_W[:,:,d] = inv(inv(prior.sigma2_y)*sqsum(XX, 3) + inv(prior.Sigma_W[:,:,d]))\n        m_W[:,d] = Sigma_W[:,:,d]*(inv(prior.sigma2_y)*X*(Y[[d],:] - mu[d]*ones(1, N))'\n                                   + inv(prior.Sigma_W[:,:,d])*prior.m_W[:,d])\n    end\n    return DRModel(D, M, prior.sigma2_y, m_W, Sigma_W, posterior.m_mu, posterior.Sigma_mu)\nend\n\nfunction update_mu(Y::Array{Float64, 2}, prior::DRModel, posterior::DRModel,\n                   X::Array{Float64, 2}, XX::Array{Float64, 3})\n    N = size(Y, 2)\n    D = prior.D\n    M = prior.M\n    W = posterior.m_W\n    Sigma_mu = inv(N*inv(prior.sigma2_y)*eye(D) + inv(prior.Sigma_mu))\n    m_mu = Sigma_mu*(inv(prior.sigma2_y)*sqsum(Y - W'*X, 2) + inv(prior.Sigma_mu)*prior.m_mu)\n    return DRModel(D, M, prior.sigma2_y, posterior.m_W, posterior.Sigma_W, m_mu, Sigma_mu)\nend\n\nfunction update_X(Y::Array{Float64, 2}, posterior::DRModel)\n    D, N = size(Y)\n    M = posterior.M\n    \n    W = posterior.m_W\n    WW = zeros(M, M, D)\n    for d in 1 : D\n        WW[:,:,d] = W[:,d]*W[:,d]' + posterior.Sigma_W[:,:,d]\n    end\n    mu = posterior.m_mu\n    X = zeros(M, N)\n    XX = zeros(M, M, N)\n    for n in 1 : N\n        Sigma = inv(inv(posterior.sigma2_y)*sqsum(WW, 3) + eye(M))\n        X[:,n] = inv(posterior.sigma2_y)*Sigma*W*(Y[:,n] - mu)\n        XX[:,:,n] = X[:,n] * X[:,n]' + Sigma\n    end\n    return X, XX\nend\n\nfunction interpolate(mask::BitArray{2}, X::Array{Float64, 2}, posterior::DRModel)\n    Y_est = posterior.m_W'*X + repmat(posterior.m_mu, 1, size(X, 2))\n    return return Y_est[mask]\nend\n\n\"\"\"\nCompute variational posterior distributions.\n\"\"\"\nfunction VI(Y::Array{Float64, 2}, prior::DRModel, max_iter::Int)\n    X, XX = init(Y, prior)\n    mask = isnan.(Y)\n    sum_nan = sum(mask)\n    posterior = deepcopy(prior)\n\n    #progress = Progress(max_iter)\n    for iter in 1 : max_iter\n        # progress\n        #next!(progress)\n        \n        # Interpolate\n        if sum_nan > 0\n            Y[mask] = interpolate(mask, X, posterior)\n        end\n        \n        # M-step\n        posterior = update_W(Y, prior, posterior, X, XX)\n        posterior = update_mu(Y, prior, posterior, X, XX)\n        \n        # E-step\n        X, XX = update_X(Y, posterior)\n    end\n    \n    return posterior, X\nend\n\nend\n"
  },
  {
    "path": "src/GaussianMixtureModel.jl",
    "content": "\"\"\"\nBayesian Gaussian Mixture Model\n\"\"\"\nmodule GaussianMixtureModel\nusing StatsFuns.logsumexp, SpecialFunctions.digamma\nusing Distributions\nusing PDMats\n\nexport GW, BGMM, Gauss, GMM\nexport sample_GMM, sample_data, winner_takes_all\nexport learn_GS, learn_CGS, learn_VI\n\n####################\n## Types\nstruct GW\n    # Parameters of Gauss Wisahrt distribution\n    beta::Float64\n    m::Vector{Float64}\n    nu::Float64\n    W::Matrix{Float64}\nend\n\nstruct BGMM\n    # Parameters of Bayesian Gaussian Mixture Model \n    D::Int\n    K::Int\n    alpha::Vector{Float64}\n    cmp::Vector{GW}\nend\n\nstruct Gauss\n    # Parameters of Gauss Distribution\n    mu::Vector{Float64}\n    Lambda::Matrix{Float64}\nend\n\nstruct GMM\n    # Parameters of Gauss Mixture Model\n    D::Int\n    K::Int\n    phi::Vector{Float64}\n    cmp::Vector{Gauss}\nend\n\n####################\n## Common functions\n\"\"\"\nSample a GMM given hyperparameters.\n\"\"\"\nfunction sample_GMM(bgmm::BGMM)\n    cmp = Vector{Gauss}()\n    for c in bgmm.cmp\n        Lambda = rand(Wishart(c.nu, PDMats.PDMat(Symmetric(c.W))))\n        mu = rand(MvNormal(c.m, PDMats.PDMat(Symmetric(inv(c.beta*Lambda)))))\n        push!(cmp, Gauss(mu, Lambda))\n    end\n    phi = rand(Dirichlet(bgmm.alpha))\n    return GMM(bgmm.D, bgmm.K, phi, cmp)\nend\n\n\"\"\"\nSample data from a specific GMM model.\n\"\"\"\nfunction sample_data(gmm::GMM, N::Int)\n    X = zeros(gmm.D, N)\n    S = categorical_sample(gmm.phi, N)\n    for n in 1 : N\n        k = indmax(S[:, n])\n        X[:,n] = rand(MvNormal(gmm.cmp[k].mu, PDMats.PDMat(Symmetric(inv(gmm.cmp[k].Lambda)))))\n    end\n    return X, S\nend\n\ncategorical_sample(p::Vector{Float64}) = categorical_sample(p, 1)[:,1]\nfunction categorical_sample(p::Vector{Float64}, N::Int)\n    K = length(p)\n    S = zeros(K, N)\n    S_tmp = rand(Categorical(p), N)\n    for k in 1 : K\n        S[k,find(S_tmp.==k)] = 1\n    end\n    return S\nend\n\nfunction sumdigamma(nu, D)\n    ret = 0.0\n    for d in 1 : D\n        ret += digamma.(0.5*(nu + 1 - d))\n    end\n    return ret\nend\n\nfunction init_S(X::Matrix{Float64}, bgmm::BGMM)\n    N = size(X, 2)\n    K = bgmm.K\n    S = categorical_sample(ones(K)/K, N)    \n    return S\nend\n\nfunction calc_ELBO(X::Array{Float64, 2}, pri::BGMM, pos::BGMM)\n    function logCw(nu, W)\n        D = size(W, 1)\n        return -0.5*nu*logdet(W) - 0.5*nu*D*log.(2) - 0.25*D*(D-1)*log.(pi) - sum([lgamma.(0.5*(nu+1-d)) for d in 1 : D])\n    end\n    \n    ln_expt_S = update_S(pos, X)\n    expt_S = exp.(ln_expt_S)\n    K, N = size(expt_S)\n    D = size(X, 1)\n\n    expt_ln_lkh = 0\n    for k in 1 : K\n        expt_Lambda = pos.cmp[k].nu * pos.cmp[k].W\n        expt_Lambda_mu = pos.cmp[k].nu * pos.cmp[k].W * pos.cmp[k].m\n        expt_mu_Lambda_mu = (pos.cmp[k].nu * pos.cmp[k].m' * pos.cmp[k].W * pos.cmp[k].m)[1] + D/pos.cmp[k].beta\n        expt_ln_Lambda = sumdigamma(pos.cmp[k].nu, D) + D*log.(2) + logdet(pos.cmp[k].W)\n        expt_ln_pi = digamma.(pos.alpha) - digamma.(sum(pos.alpha))\n        for n in 1 : N\n            # <ln p(X|S, mu, Lambda)>\n            expt_ln_lkh += -0.5 * expt_S[k,n]*(trace(X[:,n]*X[:,n]'*expt_Lambda)\n                                               - 2*(X[:,n]'*expt_Lambda_mu)[1]\n                                               + expt_mu_Lambda_mu\n                                               - expt_ln_Lambda\n                                               + D * log.(2*pi)\n                                               )\n            # <ln p(S|pi)>\n            expt_ln_lkh += expt_S[k,n]*expt_ln_pi[k]\n        end\n    end\n    # -<ln q(S)>\n    expt_ln_lkh -= sum(expt_S.*ln_expt_S)\n    \n    KL_mu_Lambda = [(0.5*D*(log.(pos.cmp[k].beta) - log.(pri.cmp[k].beta) + pri.cmp[k].beta/pos.cmp[k].beta - pos.cmp[k].nu - 1)\n                     + 0.5*(pos.cmp[k].nu-pri.cmp[k].nu)*(sumdigamma(pos.cmp[k].nu, D) + D*log.(2) + logdet(pos.cmp[k].W))\n                     + logCw(pos.cmp[k].nu, pos.cmp[k].W) - logCw(pri.cmp[k].nu, pri.cmp[k].W)\n                     + 0.5*pos.cmp[k].nu*trace((pri.cmp[k].beta*(pos.cmp[k].m-pri.cmp[k].m)*(pos.cmp[k].m-pri.cmp[k].m)'\n                                                +inv(pri.cmp[k].W))*pos.cmp[k].W)) for k in 1 : K]\n    \n    KL_pi = (lgamma.(sum(pos.alpha)) - lgamma.(sum(pri.alpha))\n             - sum(lgamma.(pos.alpha)) + sum(lgamma.(pri.alpha))\n             + (pos.alpha - pri.alpha)' * (digamma.(pos.alpha) - digamma.(sum(pos.alpha)))\n             )[1]\n\n    VB = expt_ln_lkh - (sum(KL_mu_Lambda) + KL_pi)\n    return VB\nend\n\nfunction add_stats(bgmm::BGMM, X::Matrix{Float64}, S::Matrix{Float64})\n    D = bgmm.D\n    K = bgmm.K\n    sum_S = sum(S, 2)\n    alpha = [bgmm.alpha[k] + sum_S[k] for k in 1 : K]\n    cmp = Vector{GW}()\n\n    XS = X*S';\n    for k in 1 : K\n        beta = bgmm.cmp[k].beta + sum_S[k]\n        m = (1.0/beta)*(vec(X*S[[k],:]') + bgmm.cmp[k].beta*bgmm.cmp[k].m)\n        nu = bgmm.cmp[k].nu + sum_S[k]\n        W = inv(X*diagm(S[k,:])*X'\n                       - beta*m*m'\n                       + bgmm.cmp[k].beta*bgmm.cmp[k].m*bgmm.cmp[k].m'\n                       + inv(bgmm.cmp[k].W))\n        \n        push!(cmp, GW(beta, m, nu, W))\n    end\n    return BGMM(D, K, alpha, cmp)\nend\n\nremove_stats(bgmm::BGMM, X::Matrix{Float64}, S::Matrix{Float64}) = add_stats(bgmm, X, -S)\n\n####################\n## used for Variational Inference\nfunction update_S(bgmm::BGMM, X::Matrix{Float64})\n    D, N = size(X)\n    K = bgmm.K\n    ln_S = zeros(K, N)\n    tmp = zeros(K)\n\n    tmp = NaN * zeros(K)\n    sum_digamma_tmp = digamma.(sum(bgmm.alpha))\n    for k in 1 : K\n        tmp[k] = -0.5*(bgmm.cmp[k].nu*trace(bgmm.cmp[k].m*bgmm.cmp[k].m'*bgmm.cmp[k].W)\n                       + D*(1.0/bgmm.cmp[k].beta)\n                       - (sumdigamma(bgmm.cmp[k].nu, D) + logdet(bgmm.cmp[k].W)))\n        tmp[k] += digamma.(bgmm.alpha[k]) - sum_digamma_tmp\n    end\n    for n in 1 : N\n        tmp_ln_pi = NaN * zeros(K)\n        for k in 1 : K\n            tmp_ln_pi[k] = tmp[k] -0.5*bgmm.cmp[k].nu*trace((X[:,n]*X[:,n]' - 2*bgmm.cmp[k].m*X[:,n]')*bgmm.cmp[k].W)\n        end\n        ln_S[:,n] = tmp_ln_pi - logsumexp(tmp_ln_pi)\n    end\n    return ln_S    \nend\n\n\"\"\"\nPick single states having a max probability.\n\"\"\"\nfunction winner_takes_all(S::Matrix{Float64})\n    S_ret = zeros(size(S))\n    for n in 1 : size(S_ret, 2)\n        idx = indmax(S[:,n])\n        S_ret[idx,n] = 1\n    end\n    return S_ret\nend\n\n####################\n## used for Gibbs Sampling\nfunction sample_S_GS(gmm::GMM, X::Matrix{Float64})\n    D, N = size(X)\n    K = gmm.K\n    S = zeros(K, N)\n\n    tmp = [0.5*logdet(gmm.cmp[k].Lambda) + log.(gmm.phi[k]) for k in 1 : K]\n\n    for n in 1 : N\n        tmp_ln_phi = [-0.5*trace(gmm.cmp[k].Lambda*(X[:,n] - gmm.cmp[k].mu)*(X[:,n] - gmm.cmp[k].mu)') + tmp[k] for k in 1 : K]\n        tmp_ln_phi = tmp_ln_phi - logsumexp(tmp_ln_phi)\n        S[:,n] = categorical_sample(exp.(tmp_ln_phi))\n    end\n    \n    return S\nend\n\n####################\n## used for Collapsed Gibbs Sampling\nfunction calc_ln_ST(Xn::Vector{Float64}, gw::GW)\n    # TODO; need to check value?\n    D = size(Xn, 1)\n    W = ((1 - D + gw.nu)*gw.beta / (1 + gw.beta)) * gw.W\n    #ln_lkh = logpdf(MvTDist(1 - D + gw.nu, gw.m, (gw.nu/(gw.nu - 2))*inv(W)), Xn)\n    ln_lkh = logpdf(MvTDist(1 - D + gw.nu, gw.m, PDMats.PDMat(Symmetric(inv(W)))), Xn)\n    return sum(ln_lkh)\nend\n\nfunction sample_Sn(Xn::Vector{Float64}, bgmm::BGMM)\n    ln_tmp = [(calc_ln_ST(Xn, bgmm.cmp[k]) + log.(bgmm.alpha[k])) for k in 1 : bgmm.K]\n    ln_tmp = ln_tmp -  logsumexp(ln_tmp)\n    Sn = categorical_sample(exp.(ln_tmp))\n    return Sn\nend\n\nfunction sample_S_CGS(S::Matrix{Float64}, X::Matrix{Float64}, bgmm::BGMM)\n    D, N = size(X)\n    K = size(S, 1)\n    for n in randperm(N)\n        # remove\n        bgmm = remove_stats(bgmm, X[:,[n]], S[:,[n]])\n        # sample\n        S[:,n] = sample_Sn(X[:,n], bgmm)\n        # insert\n        bgmm = add_stats(bgmm, X[:,[n]], S[:,[n]])\n    end\n    return S, bgmm\nend\n\n####################\n## Algorithm main\n\"\"\"\nCompute posterior distributions via variational inference.\n\"\"\"\nfunction learn_VI(X::Matrix{Float64}, prior_bgmm::BGMM, max_iter::Int)\n    # initialisation\n    expt_S = init_S(X, prior_bgmm)\n    bgmm = add_stats(prior_bgmm, X, expt_S)\n    VB = NaN * zeros(max_iter)\n\n    # inference\n    for i in 1 : max_iter\n        # E-step\n        expt_S = exp.(update_S(bgmm, X))\n        # M-step\n        bgmm = add_stats(prior_bgmm, X, expt_S)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bgmm, bgmm)\n    end\n\n    # assign binary values\n    S = winner_takes_all(expt_S)\n    return S, bgmm, VB\nend\n\n\"\"\"\nCompute posterior distributions via Gibbs sampling.\n\"\"\"\nfunction learn_GS(X::Matrix{Float64}, prior_bgmm::BGMM, max_iter::Int)\n    # initialisation\n    S = init_S(X, prior_bgmm)\n    bgmm = add_stats(prior_bgmm, X, S)\n    VB = NaN * zeros(max_iter)\n    \n    # inference\n    for i in 1 : max_iter            \n        # sample parameters\n        gmm = sample_GMM(bgmm)\n        # sample latent variables\n        S = sample_S_GS(gmm, X)\n        # update current model\n        bgmm = add_stats(prior_bgmm, X, S)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bgmm, bgmm)\n    end\n\n    return S, bgmm, VB\nend\n\n\"\"\"\nCompute posterior distributions via collapsed Gibbs sampling.\n\"\"\"\nfunction learn_CGS(X::Matrix{Float64}, prior_bgmm::BGMM, max_iter::Int)\n    # initialisation\n    S = init_S(X, prior_bgmm)\n    bgmm = add_stats(prior_bgmm, X, S)\n    VB = NaN * zeros(max_iter)\n\n    # inference\n    for i in 1 : max_iter\n        # directly sample S\n        S, bgmm = sample_S_CGS(S, X, bgmm)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bgmm, bgmm)\n    end\n\n    return S, bgmm, VB\nend\n\nend\n"
  },
  {
    "path": "src/LogisticRegression.jl",
    "content": "\"\"\"\nVariational inference for Bayesian logistic regression.\n\"\"\"\nmodule LogisticRegression\nusing Distributions\n\nexport sigmoid, sample_data, VI\n\nfunction sigmoid(x)\n    return 1.0 / (1.0 + exp.(-x[1]))\nend    \n\nfunction bern_sample(mu)\n    i = rand(Bernoulli(mu))\n    val = zeros(2)\n    val[i+1] = 1\n    return val\nend\n\n\"\"\"\nSample data & parameter given covariance Sigma_w and inputs X.\n\"\"\"\nfunction sample_data(X, Sigma_w)\n    N = size(X, 2)\n    M = size(Sigma_w, 1)\n\n    # sample parameters\n    W = rand(MvNormal(zeros(M), Sigma_w))\n    \n    # sample data\n    Y = [rand(Bernoulli(sigmoid(W'*X[:, n]))) for n in 1 : N]\n    return Y, W\nend\n\n\"\"\"\nCompute variational parameters.\n\"\"\"\nfunction VI(Y, X, M, Sigma_w, alpha, max_iter)\n    function rho2sig(rho)\n        return log.(1 + exp.(rho))\n    end\n    \n    function compute_df_dw(Y, X, Sigma_w, mu, rho, W)\n        M, N = size(X)\n        term1 = (mu - W) ./ rho2sig(rho).^2\n        term2 = inv(Sigma_w)*W\n        term3 = 0\n        for n in 1 : N\n            term3 += -(Y[n] - sigmoid(W'*X[:,n])) * X[:,n]\n        end\n        return term1 + term2 + term3\n    end\n    \n    function compute_df_dmu(mu, rho, W)\n        return (W - mu) ./ rho2sig(rho).^2\n    end\n    \n    function compute_df_drho(Y, X, Sigma_w, mu, rho, W)\n        return -0.5*((W - mu).^2 - rho2sig(rho).^2) .* compute_dprec_drho(rho)\n    end\n    \n    function compute_dprec_drho(rho)\n        return 2 * rho2sig(rho) .^ (-3) .* (1 ./ (1+exp.(rho))).^2 .* (1 ./ (1+exp.(-rho)))\n    end\n\n    # diag gaussian for approximate posterior\n    mu = randn(M)\n    rho = randn(M) # sigma = log.(1 + exp.(rho))\n    \n    for i in 1 : max_iter\n        # sample epsilon\n        ep = rand(M)\n        W_tmp = mu + log.(1 + exp.(rho)) .* ep\n\n        # calculate gradient\n        df_dw = compute_df_dw(Y, X, Sigma_w, mu, rho, W_tmp)\n        df_dmu = compute_df_dmu(mu, rho, W_tmp)\n        df_drho = compute_df_drho(Y, X, Sigma_w, mu, rho, W_tmp)\n        d_mu = df_dw + df_dmu\n        d_rho = df_dw .* (ep ./ (1+exp.(-rho))) + df_drho\n\n        # update variational parameters\n        mu = mu - alpha * d_mu\n        rho = rho - alpha * d_rho \n    end\n    return mu, rho\nend\n\nend\n"
  },
  {
    "path": "src/NMF.jl",
    "content": "\"\"\"\nVariational inference for Bayesian NMF\n\"\"\"\nmodule NMF\nusing Distributions\nusing StatsFuns.logsumexp, SpecialFunctions.digamma\n\nexport NMFModel\nexport sample_data, VI\n\n####################\n## Types\nstruct NMFModel\n    a_t::Array{Float64, 2} # D x K\n    b_t::Array{Float64, 2} # D x L\n    a_v::Float64 # 1 dim\n    b_v::Float64 # 1 dim\nend\n\nfunction sqsum(mat::Array, idx)\n    return squeeze(sum(mat, idx), idx)\nend\n\n####################\n## functions\nfunction init(X::Array{Int64, 2}, model::NMFModel)\n    D, N = size(X)\n    K = size(model.a_t, 2)\n    S = zeros(D, K, N)\n    A_t = rand(D, K)\n    B_t = rand(D, K)\n    A_v = rand(K, N)\n    B_v = rand(K, N)\n    for d in 1 : D\n        for k in 1 : K\n            for n in 1 : N\n                S[d,k,n] = X[d,n] * A_t[d,k] * B_t[d,k] * A_v[k,n] * B_v[k,n]\n            end\n        end\n    end   \n    return S, A_t, B_t, A_v, B_v\nend\n\nfunction update_S(X::Array{Int64, 2}, A_t::Array{Float64, 2}, B_t::Array{Float64, 2}, A_v::Array{Float64, 2}, B_v::Array{Float64, 2})\n    D, K = size(A_t)\n    N = size(A_v, 2)\n    S = zeros(D, K, N)\n    for d in 1 : D\n        for n in 1 : N\n            # K dim\n            ln_P = (digamma.(A_t[d,:]) + log.(B_t[d,:])\n                    + digamma.(A_v[:,n]) + log.(B_v[:,n])\n                    )\n            ln_P = ln_P - logsumexp(ln_P)\n            S[d,:,n] = X[d,n] * exp.(ln_P)\n        end\n    end\n    return S\nend\n\nfunction update_T(S::Array{Float64, 3}, A_v::Array{Float64, 2}, B_v::Array{Float64, 2}, model::NMFModel)\n    D, K, N = size(S)\n    a_t = model.a_t # DxK\n    b_t = model.b_t # DxK\n    A_t = a_t + sqsum(S, 3)\n    B_t = (a_t ./ b_t + repmat(sqsum(A_v.*B_v, 2)', D, 1)).^(-1)\n    return A_t, B_t\nend\n\nfunction update_V(S::Array{Float64, 3}, A_t::Array{Float64, 2}, B_t::Array{Float64, 2}, model::NMFModel)\n    a_v = model.a_v\n    b_v = model.b_v\n    D, K, N = size(S)\n    A_v = a_v + sqsum(S, 1)\n    B_v = (a_v / b_v + repmat(sqsum(A_t.*B_t, 1), 1, N)).^(-1)\n    return A_v, B_v\nend\n\n\"\"\"\nSample data given hyperparameters.\n\"\"\"\nfunction sample_data(N::Int, model::NMFModel)\n    # TODO; check b or 1/b ?\n    D, K = size(model.a_t)\n\n    T = zeros(D, K)\n    for d in 1 : D\n        for k in 1 : K\n            T[d,k] = rand(Gamma(model.a_t[d,k], 1.0/model.b_t[d,k])) # TODO: check\n        end\n    end\n\n    V = reshape(rand(Gamma(model.a_v, 1.0/model.b_v), K*N) , K, N) # TODO: check\n    \n    S = zeros(D, K, N)\n    for d in 1 : D\n        for k in 1 : K\n            for n in 1 : N\n                S[d,k,n] = T[d,k] * V[k,n]\n            end\n        end\n    end\n    #X = sqsum(S, 2) + 0.0 # zero noise\n    X = sqsum(S, 2)\n    return X, T, S, V\nend\n\nfunction update_model(A_t::Array{Float64, 2}, B_t::Array{Float64, 2}, model::NMFModel)\n    return NMFModel(A_t, B_t, model.a_v, model.b_v)\nend\n\n\"\"\"\nCompute variational posterior distributions.\n\"\"\"\nfunction VI(X::Array{Int64, 2}, model::NMFModel, max_iter::Int)\n    K = size(model.a_t, 2)\n    D, N = size(X)\n    S, A_t, B_t, A_v, B_v = init(X, model)\n    for iter in 1 : max_iter\n        # latent\n        S = update_S(X, A_t, B_t, A_v, B_v)\n        A_v, B_v = update_V(S, A_t, B_t, model)\n\n        # param\n        A_t, B_t = update_T(S, A_v, B_v, model)\n    end\n    \n    return update_model(A_t, B_t, model), S, A_t.*B_t, A_v.*B_v\nend\n\nend\n"
  },
  {
    "path": "src/PoissonHMM.jl",
    "content": "\"\"\"\nBayesian 1dim Poisson Hidden Markov Model\n\"\"\"\nmodule PoissonHMM\nusing StatsFuns.logsumexp, SpecialFunctions.digamma\nusing Distributions\n\nexport Gam, GHMM, Poi, HMM\nexport sample_HMM, sample_data, winner_takes_all\nexport learn_VI\n\n####################\n## Types\nstruct Gam\n    # Parameters of Gamma distribution\n    # 1dim\n    a::Float64\n    b::Float64\nend\n\nstruct BHMM\n    # Parameters of Bayesian Bernoulli Mixture Model \n    K::Int\n    alpha_phi::Vector{Float64}\n    alpha_A::Matrix{Float64}    \n    cmp::Vector{Gam}\nend\n\nstruct Poi\n    # Parameters of Poisson Distribution\n    # 1 dim\n    lambda::Float64\nend\n\nstruct HMM\n    # Parameters of Bernoulli Mixture Model\n    K::Int\n    phi::Vector{Float64}\n    A::Matrix{Float64}\n    cmp::Vector{Poi}\nend\n\n####################\n## Common functions\n\"\"\"\nSample an HMM from prior\n\"\"\"\nfunction sample_HMM(bhmm::BHMM)\n    cmp = Vector{Poi}()\n    for c in bhmm.cmp\n        lambda = rand(Gamma(c.a, 1.0/c.b))\n        push!(cmp, Poi(lambda))\n    end\n    phi = rand(Dirichlet(bhmm.alpha_phi))\n    A = zeros(size(bhmm.alpha_A))\n    for k in 1 : bhmm.K\n        A[:,k] = rand(Dirichlet(bhmm.alpha_A[:,k]))\n    end\n    return HMM(bhmm.K, phi, A, cmp)\nend\n\n\"\"\"\nSample data from a specific Poisson HMM\n\"\"\"\nfunction sample_data(hmm::HMM, N::Int)\n    X = zeros(N)\n    Z = zeros(hmm.K, N)\n\n    # sample (n=1)\n    Z[:,1] = categorical_sample(hmm.phi)\n    k = indmax(Z[:, 1])\n    X[1] = rand(Poisson(hmm.cmp[k].lambda))\n\n    # sample (n>1)\n    for n in 2 : N\n        Z[:,n] = categorical_sample(hmm.A[:,k])\n        k = indmax(Z[:, n])\n        X[n] = rand(Poisson(hmm.cmp[k].lambda))\n    end\n    return X, Z\nend\n\ncategorical_sample(p::Vector{Float64}) = categorical_sample(p, 1)[:,1]\nfunction categorical_sample(p::Vector{Float64}, N::Int)\n    K = length(p)\n    S = zeros(K, N)\n    S_tmp = rand(Categorical(p), N)\n    for k in 1 : K\n        S[k,find(S_tmp.==k)] = 1\n    end\n    return S\nend\n\nfunction init_Z(X::Vector{Float64}, bhmm::BHMM)\n    N = size(X, 1)\n    K = bhmm.K\n    \n    Z = rand(Dirichlet(ones(K)/K), N)\n    ZZ = [zeros(K,K) for _ in 1 : N - 1]\n    for n in 1 : N - 1\n        ZZ[n] = Z[:,n+1] * Z[:,n]'\n    end\n    \n    return Z, ZZ\nend\n\n\"\"\"\nNot implemented yet.\n\"\"\"\nfunction calc_ELBO(X::Matrix{Float64}, pri::BHMM, pos::BHMM)\nend\n\nfunction add_stats(bhmm::BHMM, X::Vector{Float64},\n                   Z::Matrix{Float64}, ZZ::Vector{Matrix{Float64}})\n    K = bhmm.K\n    sum_Z = sum(Z, 2)\n    alpha_phi = [bhmm.alpha_phi[k] + Z[k,1] for k in 1 : K]\n    alpha_A = bhmm.alpha_A + sum(ZZ)\n    cmp = Vector{Gam}()\n    \n    ZX = Z*X # (KxN) x (Nx1) = Kx1\n    for k in 1 : K\n        a = bhmm.cmp[k].a + ZX[k]\n        b = bhmm.cmp[k].b + sum_Z[k]\n        push!(cmp, Gam(a, b))\n    end\n    return BHMM(K, alpha_phi, alpha_A, cmp)\nend\n\nremove_stats(bhmm::BHMM, X::Vector{Float64}, Z::Matrix{Float64}) = add_stats(bhmm, X, -Z)\n\n####################\n## used for Variational Inference\nfunction update_Z(bhmm::BHMM, X::Vector{Float64}, Z::Matrix{Float64})\n    N = size(X, 1)\n    K = bhmm.K\n    ln_expt_Z = zeros(K, N)\n\n    ln_lkh = zeros(K, N)\n    for k in 1 : K\n        ln_lambda = digamma.(bhmm.cmp[k].a) - log.(bhmm.cmp[k].b)\n        lambda = bhmm.cmp[k].a / bhmm.cmp[k].b\n        for n in 1 : N\n            ln_lkh[k,n] = X[n]'*(ln_lambda) - lambda\n        end\n    end\n    \n    expt_ln_A = zeros(size(bhmm.alpha_A))\n    for k in 1 : K\n        expt_ln_A[:,k] = digamma.(bhmm.alpha_A[:,k]) - digamma.(sum(bhmm.alpha_A[:,k]))\n    end\n\n    # copy\n    ln_expt_Z = log.(Z)\n    \n    # n = 1\n    ln_expt_Z[:,1] = (digamma.(bhmm.alpha_phi) - digamma.(sum(bhmm.alpha_phi))\n                      + expt_ln_A' * exp.(ln_expt_Z[:,2])\n                      + ln_lkh[:,1]\n                      )\n    ln_expt_Z[:,1] = ln_expt_Z[:,1] - logsumexp(ln_expt_Z[:,1])\n    \n    # 2 <= n <= N - 1\n    for n in 2 : N - 1\n        ln_expt_Z[:,n] =( expt_ln_A * exp.(ln_expt_Z[:,n-1])\n                          + expt_ln_A' * exp.(ln_expt_Z[:,n+1])\n                          + ln_lkh[:,n]\n                          )\n        ln_expt_Z[:,n] = ln_expt_Z[:,n] - logsumexp(ln_expt_Z[:,n])\n    end\n    \n    # n = N\n    ln_expt_Z[:,N] =( expt_ln_A * exp.(ln_expt_Z[:,N-1])\n                      + ln_lkh[:,N]\n                      )\n    ln_expt_Z[:,N] = ln_expt_Z[:,N] - logsumexp(ln_expt_Z[:,N])\n    \n    # calc output\n    Z_ret = exp.(ln_expt_Z)\n    ZZ_ret = [zeros(K,K) for _ in 1 : N - 1]\n    for n in 1 : N - 1\n        ZZ_ret[n] = Z_ret[:,n+1] * Z_ret[:,n]'\n    end\n    return Z_ret, ZZ_ret\nend\n\n\"\"\"\nPick single states having a max probability.\n\"\"\"\nfunction winner_takes_all(Z::Matrix{Float64})\n    Z_ret = zeros(size(Z))\n    for n in 1 : size(Z_ret, 2)\n        idx = indmax(Z[:,n])\n        Z_ret[idx,n] = 1\n    end\n    return Z_ret\nend\n\nfunction logmatprod(ln_A::Array{Float64}, ln_B::Array{Float64})\n    I = size(ln_A, 1)\n    J = size(ln_B, 2)\n    ln_C = zeros(I, J)\n    for i in 1 : I\n        for j in 1 : J\n            ln_C[i, j] = logsumexp(ln_A[i, :] + ln_B[:, j])\n        end\n    end\n    return ln_C\nend\n\nfunction update_Z_fb(bhmm::BHMM, X::Vector{Float64})\n    K = bhmm.K\n    N = length(X)\n\n    # calc likelihood\n    ln_lik = zeros(K, N)\n    for k in 1 : K\n        ln_lambda = digamma.(bhmm.cmp[k].a) - log.(bhmm.cmp[k].b)\n        lambda = bhmm.cmp[k].a / bhmm.cmp[k].b\n        for n in 1 : N\n            ln_lik[k,n] =X[n]'*(ln_lambda) - lambda                \n        end\n    end\n    expt_ln_phi = digamma.(bhmm.alpha_phi) - digamma.(sum(bhmm.alpha_phi))\n    expt_ln_A = zeros(K,K)\n    for k in 1 : K\n        expt_ln_A[:,k] = digamma.(bhmm.alpha_A[:,k]) - digamma.(sum(bhmm.alpha_A[:,k]))\n    end\n\n    Z, ZZ = fb_alg(ln_lik, expt_ln_phi, expt_ln_A)\n    \n    # different notation\n    ZZ_ret = [ZZ[:,:,n] for n in 1:size(ZZ, 3)]\n\n    return Z, ZZ_ret\nend\n\nfunction fb_alg(ln_lik::Matrix{Float64}, ln_phi::Vector{Float64}, ln_A::Matrix{Float64})\n    K, T = size(ln_lik)\n    ln_Z = zeros(K, T)\n    ln_ZZ = zeros(K, K, T)\n    ln_alpha = zeros(K, T)\n    ln_beta = zeros(K, T)\n    ln_st = zeros(T)\n    \n    for t in 1 : T\n        if t == 1\n            ln_alpha[:, 1] = ln_phi + ln_lik[:, 1]\n        else\n            ln_alpha[:, t] = logmatprod(ln_A, ln_alpha[:, t-1]) + ln_lik[:, t]\n        end\n        ln_st[t] = logsumexp(ln_alpha[:, t])\n        ln_alpha[:,t] = ln_alpha[:,t] - ln_st[t]\n    end\n    \n    for t in T-1 : -1 : 1\n        ln_beta[:, t] = logmatprod(ln_A', ln_beta[:, t+1] + ln_lik[:,t+1])\n        ln_beta[:, t] = ln_beta[:, t] - ln_st[t+1]\n    end\n    \n    ln_Z = ln_alpha + ln_beta\n    for t in 1 : T\n        if t < T\n            ln_ZZ[:,:,t] = (repmat(ln_alpha[:, t]', K, 1) + ln_A\n            + repmat(ln_lik[:, t+1] + ln_beta[:,t+1], 1, K))\n            ln_ZZ[:,:,t] = ln_ZZ[:,:,t] - ln_st[t+1]\n        end\n    end\n    return exp.(ln_Z), exp.(ln_ZZ)\nend\n\n\"\"\"\nCompute approximate posterior distributions via variational inference.\n\"\"\"\nfunction learn_VI(X::Vector{Float64}, prior_bhmm::BHMM, max_iter::Int)\n    # initialisation\n    expt_Z, expt_ZZ = init_Z(X, prior_bhmm)\n    bhmm = add_stats(prior_bhmm, X, expt_Z, expt_ZZ)\n    VB = NaN * zeros(max_iter)\n\n    # inference\n    for i in 1 : max_iter\n        # E-step\n        #expt_Z, expt_ZZ = update_Z(bhmm, X, expt_Z)\n        expt_Z, expt_ZZ = update_Z_fb(bhmm, X)\n        # M-step\n        bhmm = add_stats(prior_bhmm, X, expt_Z, expt_ZZ)\n    end\n\n    return expt_Z, bhmm\nend\nend\n"
  },
  {
    "path": "src/PoissonMixtureModel.jl",
    "content": "\"\"\"\nBayesian Poisson Mixture Model\n\"\"\"\nmodule PoissonMixtureModel\nusing StatsFuns.logsumexp, SpecialFunctions.digamma\nusing Distributions\n\nexport Gam, BPMM, Poi, PMM\nexport sample_PMM, sample_data, winner_takes_all\nexport learn_GS, learn_CGS, learn_VI\n\n####################\n## Types\nstruct Gam\n    # Parameters of Gamma distribution\n    a::Vector{Float64}\n    b::Float64\nend\n\nstruct BPMM\n    # Parameters of Bayesian Poisson Mixture Model \n    D::Int\n    K::Int\n    alpha::Vector{Float64}\n    cmp::Vector{Gam}\nend\n\nstruct Poi\n    # Parameters of Poisson Distribution\n    lambda::Vector{Float64}\nend\n\nstruct PMM\n    # Parameters of Poisson Mixture Model\n    D::Int\n    K::Int\n    phi::Vector{Float64}\n    cmp::Vector{Poi}\nend\n\n####################\n## Common functions\n\"\"\"\nSample a PMM given hyperparameters.\n\"\"\"\nfunction sample_PMM(bpmm::BPMM)\n    cmp = Vector{Poi}()\n    for c in bpmm.cmp\n        lambda = Vector{Float64}()\n        for d in 1 : bpmm.D\n            push!(lambda, rand(Gamma(c.a[d], 1.0/c.b)))\n        end\n        push!(cmp, Poi(lambda))  \n    end\n    phi = rand(Dirichlet(bpmm.alpha))\n    return PMM(bpmm.D, bpmm.K, phi, cmp)\nend\n\n\"\"\"\nSample data from a specific PMM model.\n\"\"\"\nfunction sample_data(pmm::PMM, N::Int)\n    X = zeros(pmm.D, N)\n    S = categorical_sample(pmm.phi, N)\n    for n in 1 : N\n        k = indmax(S[:, n])\n        for d in 1 : pmm.D\n            X[d,n] = rand(Poisson(pmm.cmp[k].lambda[d]))\n        end\n    end\n    return X, S\nend\n\ncategorical_sample(p::Vector{Float64}) = categorical_sample(p, 1)[:,1]\nfunction categorical_sample(p::Vector{Float64}, N::Int)\n    K = length(p)\n    S = zeros(K, N)\n    S_tmp = rand(Categorical(p), N)\n    for k in 1 : K\n        S[k,find(S_tmp.==k)] = 1\n    end\n    return S\nend\n\nfunction init_S(X::Matrix{Float64}, bpmm::BPMM)\n    N = size(X, 2)\n    K = bpmm.K\n    S = categorical_sample(ones(K)/K, N)    \n    return S\nend\n\nfunction calc_ELBO(X::Matrix{Float64}, pri::BPMM, pos::BPMM)\n    ln_expt_S = update_S(pos, X)\n    expt_S = exp.(ln_expt_S)\n    K, N = size(expt_S)\n    D = size(X, 1)\n\n    expt_ln_lambda = zeros(D, K)\n    expt_lambda = zeros(D, K)\n    expt_ln_lkh = 0\n    for k in 1 : K\n        expt_ln_lambda[:,k] = digamma.(pos.cmp[k].a) - log.(pos.cmp[k].b)\n        expt_lambda[:,k] = pos.cmp[k].a / pos.cmp[k].b\n        for n in 1 : N\n            expt_ln_lkh += expt_S[k,n] * (X[:, n]' * expt_ln_lambda[:,k]\n                                       - sum(expt_lambda[:,k]) - sum(lgamma.(X[:,n]+1)))[1]\n        end\n    end\n    \n    expt_ln_pS = sum(expt_S' * (digamma.(pos.alpha) - digamma.(sum(pos.alpha))))\n    expt_ln_qS = sum(expt_S .* ln_expt_S)\n    \n    KL_lambda = 0\n    for k in 1 : K\n        KL_lambda += (sum(pos.cmp[k].a)*log.(pos.cmp[k].b) - sum(pri.cmp[k].a)*log.(pri.cmp[k].b)\n                      - sum(lgamma.(pos.cmp[k].a)) + sum(lgamma.(pri.cmp[k].a))\n                      + (pos.cmp[k].a - pri.cmp[k].a)' * expt_ln_lambda[:,k]\n                      + (pri.cmp[k].b - pos.cmp[k].b) * sum(expt_lambda[:,k])\n                      )[1]\n    end\n    KL_pi = (lgamma.(sum(pos.alpha)) - lgamma.(sum(pri.alpha))\n             - sum(lgamma.(pos.alpha)) + sum(lgamma.(pri.alpha))\n             + (pos.alpha - pri.alpha)' * (digamma.(pos.alpha) - digamma.(sum(pos.alpha)))\n             )[1]\n    \n    VB = expt_ln_lkh + expt_ln_pS - expt_ln_qS - (KL_lambda + KL_pi)\n    return VB\nend\n\nfunction add_stats(bpmm::BPMM, X::Matrix{Float64}, S::Matrix{Float64})\n    D = bpmm.D\n    K = bpmm.K\n    sum_S = sum(S, 2)\n    alpha = [bpmm.alpha[k] + sum_S[k] for k in 1 : K]\n    cmp = Vector{Gam}()\n\n    XS = X*S';\n    for k in 1 : K\n        a = [(bpmm.cmp[k].a[d] + XS[d,k])::Float64 for d in 1 : D]\n        b = bpmm.cmp[k].b + sum_S[k]\n        push!(cmp, Gam(a, b))\n    end\n    return BPMM(D, K, alpha, cmp)\nend\n\nremove_stats(bpmm::BPMM, X::Matrix{Float64}, S::Matrix{Float64}) = add_stats(bpmm, X, -S)\n\n####################\n## used for Variational Inference\nfunction update_S(bpmm::BPMM, X::Matrix{Float64})\n    D, N = size(X)\n    K = bpmm.K\n    ln_expt_S = zeros(K, N)\n    tmp = zeros(K)\n\n    sum_digamma_tmp = digamma.(sum(bpmm.alpha))\n    for k in 1 : K\n        tmp[k] = - sum(bpmm.cmp[k].a) / bpmm.cmp[k].b\n        tmp[k] += digamma.(bpmm.alpha[k]) - sum_digamma_tmp\n    end\n    ln_lambda_X = [X'*(digamma.(bpmm.cmp[k].a) - log.(bpmm.cmp[k].b)) for k in 1 : K]\n    for n in 1 : N\n        tmp_ln_pi =  [tmp[k] + ln_lambda_X[k][n] for k in 1 : K]\n        ln_expt_S[:,n] = tmp_ln_pi - logsumexp(tmp_ln_pi)\n    end\n    return ln_expt_S\nend\n\n\"\"\"\nPick single states having a max probability.\n\"\"\"\nfunction winner_takes_all(S::Matrix{Float64})\n    S_ret = zeros(size(S))\n    for n in 1 : size(S_ret, 2)\n        idx = indmax(S[:,n])\n        S_ret[idx,n] = 1\n    end\n    return S_ret\nend\n\n####################\n## used for Gibbs Sampling\nfunction sample_S_GS(pmm::PMM, X::Matrix{Float64})\n    D, N = size(X)\n    K = pmm.K\n    S = zeros(K, N)\n\n    tmp = [-sum(pmm.cmp[k].lambda) + log.(pmm.phi[k]) for k in 1 : K]\n    ln_lambda_X = [X'*log.(pmm.cmp[k].lambda) for k in 1 : K]\n    for n in 1 : N\n        tmp_ln_phi = [(tmp[k] + ln_lambda_X[k][n])::Float64 for k in 1 : K]\n        tmp_ln_phi = tmp_ln_phi - logsumexp(tmp_ln_phi)\n        S[:,n] = categorical_sample(exp.(tmp_ln_phi))\n    end\n    return S\nend\n\n####################\n## used for Collapsed Gibbs Sampling\nfunction calc_ln_NB(Xn::Vector{Float64}, gam::Gam)\n    ln_lkh = [(gam.a[d]*log.(gam.b)\n               - lgamma.(gam.a[d])\n               + lgamma.(Xn[d] + gam.a[d])\n               - (Xn[d] + gam.a[d])*log.(gam.b + 1)\n               )::Float64 for d in 1 : size(Xn, 1)]\n    return sum(ln_lkh)\nend\n\nfunction sample_Sn(Xn::Vector{Float64}, bpmm::BPMM)\n    ln_tmp = [(calc_ln_NB(Xn, bpmm.cmp[k]) + log.(bpmm.alpha[k])) for k in 1 : bpmm.K]\n    ln_tmp = ln_tmp -  logsumexp(ln_tmp)\n    Sn = categorical_sample(exp.(ln_tmp))\n    return Sn\nend\n\nfunction sample_S_CGS(S::Matrix{Float64}, X::Matrix{Float64}, bpmm::BPMM)\n    D, N = size(X)\n    K = size(S, 1)\n    for n in randperm(N)\n        # remove\n        bpmm = remove_stats(bpmm, X[:,[n]], S[:,[n]])\n        # sample\n        S[:,n] = sample_Sn(X[:,n], bpmm)\n        # insert\n        bpmm = add_stats(bpmm, X[:,[n]], S[:,[n]])\n    end\n    return S, bpmm\nend\n\n####################\n## Algorithm main\n\"\"\"\nCompute posterior distribution via variational inference.\n\"\"\"\nfunction learn_VI(X::Matrix{Float64}, prior_bpmm::BPMM, max_iter::Int)\n    # initialisation\n    expt_S = init_S(X, prior_bpmm)\n    bpmm = add_stats(prior_bpmm, X, expt_S)\n    VB = NaN * zeros(max_iter)\n\n    # inference\n    for i in 1 : max_iter\n        # E-step\n        expt_S = exp.(update_S(bpmm, X))\n        # M-step\n        bpmm = add_stats(prior_bpmm, X, expt_S)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bpmm, bpmm)\n    end\n\n    return expt_S, bpmm, VB\nend\n\n\"\"\"\nCompute posterior distribution via Gibbs sampling.\n\"\"\"\nfunction learn_GS(X::Matrix{Float64}, prior_bpmm::BPMM, max_iter::Int)\n    # initialisation\n    S = init_S(X, prior_bpmm)\n    bpmm = add_stats(prior_bpmm, X, S)\n    VB = NaN * zeros(max_iter)\n    \n    # inference\n    for i in 1 : max_iter            \n        # sample parameters\n        pmm = sample_PMM(bpmm)\n        # sample latent variables\n        S = sample_S_GS(pmm, X)\n        # update current model\n        bpmm = add_stats(prior_bpmm, X, S)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bpmm, bpmm)\n    end\n\n    return S, bpmm, VB\nend\n\n\"\"\"\nCompute posterior distribution via collapsed Gibbs sampling.\n\"\"\"\nfunction learn_CGS(X::Matrix{Float64}, prior_bpmm::BPMM, max_iter::Int)\n    # initialisation\n    S = init_S(X, prior_bpmm)\n    bpmm = add_stats(prior_bpmm, X, S)\n    VB = NaN * zeros(max_iter)\n\n    # inference\n    for i in 1 : max_iter\n        # directly sample S\n        S, bpmm = sample_S_CGS(S, X, bpmm)\n        # calc VB\n        VB[i] = calc_ELBO(X, prior_bpmm, bpmm)\n    end\n\n    return S, bpmm, VB\nend\n\nend\n"
  },
  {
    "path": "src/demo_BayesNeuralNet.jl",
    "content": "####################################\n## Demo script for Bayesian neural network.\n\nusing PyPlot, PyCall\n\npush!(LOAD_PATH, \".\")\nimport BayesNeuralNet\n\n\"\"\"\nSample neural nets from prior.\n\"\"\"\nfunction sample_test()\n    # model parameters\n    D = 1 # output\n    K = 3 # hidden\n    M = 2 # input\n    sigma2_w = 10.0\n    sigma2_y = 0.1\n    \n    xmin = -5\n    xmax = 5\n    N_lin = 1000\n    X_lin = ones(M, N_lin)\n    X_lin[1,:] = linspace(xmin, xmax, N_lin)\n    X_lin[2,:] = 1 # bias\n\n    # visualize\n    num_samples = 5\n    figure(\"Function samples\")\n    clf()\n    for i in 1 : num_samples\n        _, Y_true, _, _ = BayesNeuralNet.sample_data_from_prior(X_lin, sigma2_w, sigma2_y, D, K)\n        plot(X_lin[1,:], Y_true)\n        xlim([xmin, xmax])\n    end\n    ratey = (ylim()[2] - ylim()[1]) * 0.1\n    ratex = (xlim()[2] - xlim()[1]) * 0.1\n    text(xlim()[1] + ratex, ylim()[2] - ratey, @sprintf(\"K=%d\", K), fontsize=18)\n    show()\nend\n\n\"\"\"\nRun a test script of variational inference for Bayesian neural net.\n\"\"\"\nfunction test()\n    #################\n    # prepara data\n    \n    # data size\n    D = 1 # output\n    M = 2 # input\n        \n    # function setting\n    xmin = -2\n    xmax = 4\n    N_lin = 1000\n    X_lin = ones(M, N_lin)\n    X_lin[1,:] = linspace(xmin, xmax, N_lin)\n    X_lin[2,:] = 1 # bias\n\n    # training data\n    N = 50 # data size\n    X = 2*rand(M, N) - 0.0 # input\n    X[2,:] = 1.0 # bias\n    Y = 0.5*sin.(2*pi * X[1,:]/3) + 0.05 * randn(N)    \n    \n    # model parameters\n    K = 5\n    sigma2_w = 10.0\n    sigma2_y = 0.01\n    \n    ################\n    # inference\n    alpha = 1.0e-5\n    max_iter = 100000\n    mu1, rho1, mu2, rho2 = BayesNeuralNet.VI(Y, X, sigma2_w, sigma2_y, K, alpha, max_iter)\n    Y_mean = [mu2'* tanh.(mu1'X_lin[:,n]) for n in 1 : N_lin]\n\n    ################\n    # visualize        \n    figure(\"result\")\n    clf()\n    Y_list = []\n    num_samples = 100\n    for i in 1 : num_samples\n        Y_est, _ = BayesNeuralNet.sample_data_from_posterior(X_lin, mu1, rho1, mu2, rho2, sigma2_y, D)\n        push!(Y_list, Y_est)\n        plot(X_lin[1,:], Y_est, \"-c\", alpha=0.25)\n    end\n    plot(X[1,:], Y, \"ok\")\n    plot(X_lin[1,:], Y_mean, \"b-\")\n    xlim([xmin, xmax])\n    xlabel(\"x\")\n    ylabel(\"y\")\n    show()\nend\n\n#sample_test()\ntest()\n"
  },
  {
    "path": "src/demo_DimensionalityReduction.jl",
    "content": "###################################\n## Demo script for Bayesian Dimensionality Reduction\n\nusing PyPlot, PyCall\n@pyimport sklearn.datasets as datasets\n\npush!(LOAD_PATH,\".\")\nimport DimensionalityReduction\n\nfunction load_facedata(skip::Int)\n    face = datasets.fetch_olivetti_faces()\n    Y_raw = face[\"images\"]\n    N, S_raw, _ = size(Y_raw)\n\n    L = round(Int, S_raw / skip)\n    Y_tmp = Y_raw[:,1:skip:end, 1:skip:end]\n    Y = convert(Array{Float64, 2}, reshape(Y_tmp, N, size(Y_tmp,2)*size(Y_tmp,3))')\n    D = size(Y, 1)\n    \n    return Y, D, L\nend\n\nfunction visualize(Y::Array{Float64,2}, L::Int)\n    D, N = size(Y)\n    base = round(Int, sqrt(N))\n    v = round(Int, (L*ceil(N / base)))\n    h = L * base\n    pic = zeros(v, h)\n\n    for n in 1 : N\n        i = round(Int, (L*ceil(n / base)))\n        idx1 = i - L + 1 : i\n        idx2 = L*mod(n-1, base)+1 : L*(mod(n-1, base) + 1)\n        pic[idx1,idx2] = reshape(Y[:,n], L, L)\n    end\n    imshow(pic, cmap=ColorMap(\"gray\"))\nend\n\nfunction visualize(Y::Array{Float64,2}, L::Int, mask::BitArray{2})\n    # for missing\n    D, N = size(Y)\n    base = round(Int, sqrt(N))\n    v = round(Int, (L*ceil(N / base)))\n    h = L * base\n    pic = zeros(v, h, 3)\n\n    Y_3dim = zeros(D, N, 3)\n    for i in 1 : 3\n        if i == 2\n            Y_tmp = deepcopy(Y)\n            Y_tmp[mask] = 1\n            Y_3dim[:,:,i] = Y_tmp\n        else\n            Y_tmp = deepcopy(Y)\n            Y_tmp[mask] = 0\n            Y_3dim[:,:,i] = Y_tmp\n        end\n    end\n    \n    for n in 1 : N\n        i = round(Int, (L*ceil(n / base)))\n        idx1 = i - L + 1 : i\n        idx2 = L*mod(n-1, base)+1 : L*(mod(n-1, base) + 1)\n        for i in 1 : 3\n            pic[idx1,idx2,i] = reshape(Y_3dim[:,n,i], L, L)\n        end\n    end\n    imshow(pic, cmap=ColorMap(\"gray\"))\nend\n\n\"\"\"\nRun a demo script of missing data interpolation for face dataset.\n\"\"\"\nfunction test_face_missing()\n    # load data\n    skip = 2\n    Y, D, L = load_facedata(skip)\n\n    # mask\n    missing_rate = 0.50\n    mask = rand(size(Y)) .< missing_rate\n    Y_obs = deepcopy(Y)\n    Y_obs[mask] = NaN\n    \n    # known parames\n    M = 16\n    sigma2_y = 0.001\n    Sigma_W = zeros(M,M,D)\n    Sigma_mu = 1.0 * eye(D)\n    for d in 1 : D\n        Sigma_W[:,:,d] = 0.1 * eye(M)\n    end\n    prior = DimensionalityReduction.DRModel(D, M, sigma2_y, zeros(M, D), Sigma_W, zeros(D), Sigma_mu)\n\n    # learn & generate\n    max_iter = 100\n    posterior, X_est = DimensionalityReduction.VI(deepcopy(Y_obs), prior, max_iter)\n    Y_est = posterior.m_W'*X_est + repmat(posterior.m_mu, 1, size(X_est, 2))\n    Y_itp = deepcopy(Y_obs)\n    Y_itp[mask] = Y_est[mask]\n  \n    #visualize\n    N_show = 4^2\n    \n    figure(\"Observation\")\n    clf()\n    visualize(Y_obs[:,1:N_show], L, mask[:,1:N_show])\n    title(\"Observation\")\n\n    #figure(\"Estimation\")\n    #clf()\n    #visualize(Y_est[:,1:N_show], L)\n    #title(\"Estimation\")\n\n    figure(\"Interpolation\")\n    clf()\n    visualize(Y_itp[:,1:N_show], L)\n    title(\"Interpolation\")\n\n    figure(\"Truth\")\n    clf()\n    visualize(Y[:,1:N_show], L)\n    title(\"Truth\")\n    show()\nend\n\n\"\"\"\nRun a dimensionality reduction demo using Iris dataset.\n\"\"\"\nfunction test_iris()\n    ##################\n    # load data\n    iris = datasets.load_iris()\n    Y_obs = iris[\"data\"]'\n    label_list = [iris[\"target_names\"][elem+1] for elem in iris[\"target\"]]\n    D, N = size(Y_obs)\n\n    ##################\n    # 2D compression    \n\n    # model\n    M = 2\n    sigma2_y = 0.001\n    Sigma_W = zeros(M,M,D)\n    Sigma_mu = 1.0 * eye(D)\n    for d in 1 : D\n        Sigma_W[:,:,d] = 0.1 * eye(M)\n    end\n    prior = DimensionalityReduction.DRModel(D, M, sigma2_y, zeros(M, D), Sigma_W, zeros(D), Sigma_mu)\n        \n    # learn & generate\n    max_iter = 100\n    posterior, X_est = DimensionalityReduction.VI(deepcopy(Y_obs), prior, max_iter)\n\n    # visualize\n    figure(\"2D plot\")\n    clf()\n    scatter(X_est[1,1:50], X_est[2,1:50], color=\"r\")\n    scatter(X_est[1,51:100], X_est[2,51:100], color=\"g\")\n    scatter(X_est[1,101:end], X_est[2,101:end], color=\"b\")\n    xlabel(\"\\$x_1\\$\", fontsize=20)\n    ylabel(\"\\$x_2\\$\", fontsize=20)\n    legend([label_list[1], label_list[51], label_list[101]], fontsize=16)\n\n    ##################\n    # 3D compression\n\n    # model\n    M = 3\n    sigma2_y = 0.001\n    Sigma_W = zeros(M,M,D)\n    Sigma_mu = 1.0 * eye(D)\n    for d in 1 : D\n        Sigma_W[:,:,d] = 0.1 * eye(M)\n    end\n    prior = DimensionalityReduction.DRModel(D, M, sigma2_y, zeros(M, D), Sigma_W, zeros(D), Sigma_mu)\n        \n    # learn & generate\n    max_iter = 100\n    posterior, X_est = DimensionalityReduction.VI(deepcopy(Y_obs), prior, max_iter)\n\n    # visualize\n    figure(\"3D plot\")\n    clf()\n    scatter3D(X_est[1,1:50], X_est[2,1:50], X_est[3,1:50], c=\"r\")\n    scatter3D(X_est[1,51:100], X_est[2,51:100], X_est[3,51:100], c=\"g\")\n    scatter3D(X_est[1,101:end], X_est[2,101:end], X_est[3,101:end], c=\"b\")\n    legend([label_list[1], label_list[51], label_list[101]], fontsize=16)\n    xlabel(\"\\$x_1\\$\", fontsize=20)\n    ylabel(\"\\$x_2\\$\", fontsize=20)\n    zlabel(\"\\$x_3\\$\", fontsize=20)\n    show()\nend\n\n#test_face_missing()\ntest_iris()\n"
  },
  {
    "path": "src/demo_GaussianMixtureModel.jl",
    "content": "###################################\n## Example code\n## for Bayesian Gaussin Mixture Model\n\nusing PyPlot, PyCall\npush!(LOAD_PATH,\".\")\nimport GaussianMixtureModel\n\n\"\"\"\nVisualize data & estimation in 2D space.\n\"\"\"\nfunction visualize_2D(X::Matrix{Float64}, S::Matrix{Float64}, S_est::Matrix{Float64}, text)\n    cmp = get_cmap(\"jet\")\n\n    K1 = size(S, 1)\n    K2 = size(S_est, 1)\n    col1 = [pycall(cmp.o, PyAny, Int(round(val)))[1:3] for val in linspace(0,255,K1)]    \n    col2 = [pycall(cmp.o, PyAny, Int(round(val)))[1:3] for val in linspace(0,255,K2)]    \n\n    f, (ax1, ax2) = subplots(1,2,num=text)\n    f[:clf]()\n    f, (ax1, ax2) = subplots(1,2,num=text)\n\n    for k in 1 : K1\n        ax1[:scatter](X[1, S[k,:].==1], X[2, S[k,:].==1], color=col1[k])\n    end\n    ax1[:set_title](\"truth\")\n    \n    for k in 1 : K2\n        ax2[:scatter](X[1, S_est[k,:].==1], X[2, S_est[k,:].==1], color=col2[k])\n    end\n\n    ax2[:set_title](\"estimation\")\nend\n\n\"\"\"\nRun a test script for 2D data clustering.\n\"\"\"\nfunction test_2D()\n    ## set model\n    D = 2 # data dimension\n    K = 4 #  number of mixture components\n    alpha = 100.0 * ones(K)\n    beta = 0.1\n    m = zeros(D)\n    nu = D + 1.0\n    W = eye(D)\n    cmp = [GaussianMixtureModel.GW(beta, m, nu, W) for _ in 1 : K]\n    bgmm = GaussianMixtureModel.BGMM(D, K, alpha, cmp)\n    \n    ## generate data\n    N = 300\n    gmm = GaussianMixtureModel.sample_GMM(bgmm)\n    X, S = GaussianMixtureModel.sample_data(gmm, N)\n    \n    ## inference\n    max_iter = 100\n    tic()\n    S_est, post_bgmm, VB = GaussianMixtureModel.learn_VI(X, bgmm, max_iter)\n    #S_est, post_bgmm, VB = GaussianMixtureModel.learn_GS(X, bgmm, max_iter)\n    #S_est, post_bgmm, VB = GaussianMixtureModel.learn_CGS(X, bgmm, max_iter)\n    toc()\n\n    ## plot\n    visualize_2D(X, S, GaussianMixtureModel.winner_takes_all(S_est), \"2D plot\")\n\n    # VB check\n    figure(\"ELBO\")\n    clf()\n    plot(VB)\n    ylabel(\"ELBO\")\n    xlabel(\"iterations\")\n    show()\nend\n\ntest_2D()\n"
  },
  {
    "path": "src/demo_LogisticRegression.jl",
    "content": "#####################################\n## Bayesian logistic regression demo\n\nusing PyPlot, PyCall\nusing Distributions\npush!(LOAD_PATH, \".\")\nimport LogisticRegression\n\n\"\"\"\nVisualize prediction via surface (only for 2D inputs.)\n\"\"\"\nfunction visualize_surface(mu, rho, X, Y, text)\n    N = 100\n    R = 100\n    xmin = minimum(X[1,:])\n    xmax = maximum(X[1,:])\n    ymin = minimum(X[2,:])\n    ymax = maximum(X[2,:])\n    lx = xmax - xmin\n    ly = ymax - ymin\n    xmin = xmin - 0.25 * lx\n    xmax = xmax + 0.25 * lx\n    ymin = ymin - 0.25 * ly\n    ymax = ymax + 0.25 * ly\n    \n    x1 = linspace(xmin,xmax,R)\n    x2 = linspace(ymin,ymax,R)\n    x1grid = repmat(x1, 1, R)\n    x2grid = repmat(x2', R, 1)\n    val = [x1grid[:] x2grid[:]]'\n\n    z_list = []\n    sigma = log.(1 + exp.(rho))\n    for n in 1 : N\n        W = rand(MvNormal(mu, diagm(sigma.^2)))\n        z_tmp = [LogisticRegression.sigmoid(W'*val[:,i]) for i in 1 : size(val, 2)]\n        push!(z_list, z_tmp)\n    end\n    z = mean(z_list)\n    zgrid = reshape(z, R, R)\n\n    # 3D plot\n    figure(\"surface\")\n    clf()\n    plot_surface(x1grid, x2grid, zgrid, alpha=0.5)\n    scatter3D(X[1,Y.==1], X[2,Y.==1], Y[Y.==1]+0.01, c=\"r\", depthshade=true)\n    scatter3D(X[1,Y.==0], X[2,Y.==0], Y[Y.==0], c=\"b\", depthshade=true)\n    xlim([xmin, xmax])\n    ylim([ymin, ymax])\n    zlim([0, 1])\n    title(text)\nend\n\n\"\"\"\nVisualize prediction via contour (only for 2D inputs.)\n\"\"\"\nfunction visualize_contour(mu, rho, X, Y)\n    N = 100\n    R = 100\n    xmin = 2*minimum(X[1,:])\n    xmax = 2*maximum(X[1,:])\n    ymin = minimum(X[2,:])\n    ymax = maximum(X[2,:])\n\n    x1 = linspace(xmin,xmax,R)\n    x2 = linspace(ymin,ymax,R)\n    x1grid = repmat(x1, 1, R)\n    x2grid = repmat(x2', R, 1)\n    val = [x1grid[:] x2grid[:]]'\n\n    z_list = []\n    W_list = []\n    sigma = log.(1 + exp.(rho))\n    for n in 1 : N\n        W = rand(MvNormal(mu, diagm(sigma.^2)))\n        z_tmp = [LogisticRegression.sigmoid(W'*val[:,i]) for i in 1 : size(val, 2)]\n        push!(W_list, W)\n        push!(z_list, z_tmp)\n    end\n    z = mean(z_list)\n    zgrid = reshape(z, R, R)\n\n    # precition\n    figure(\"contour\")\n    clf()\n    contour(x1grid, x2grid, zgrid, alpha=0.5, cmap=get_cmap(\"bwr\"))\n    scatter(X[1,Y.==1], X[2,Y.==1], c=\"r\")\n    scatter(X[1,Y.==0], X[2,Y.==0], c=\"b\")\n    xlim([xmin, xmax])\n    ylim([ymin, ymax])\n    title(\"prediction\")\n\n    # parameter samples\n    figure(\"samples\")\n    clf()\n    for n in 1 : 10\n        draw_line(W_list[n], xmin, xmax)\n    end\n    scatter(X[1,Y.==1]', X[2,Y.==1]', c=\"r\")\n    scatter(X[1,Y.==0]', X[2,Y.==0]', c=\"b\")\n    xlim([xmin, xmax])\n    ylim([ymin, ymax])\n    title(\"parameter samples\")\nend\n\nfunction draw_line(W, xmin, xmax)\n    y1 = - xmin*W[1]/W[2]\n    y2 = - xmax*W[1]/W[2]\n    plot([xmin, xmax], [y1, y2], c=\"k\")\nend\n\n\n########################\n# create model\n\nM = 2 # input dimension\nSigma_w = 100.0 * eye(M) # prior on W\n\n########################\n# create toy-data using prior model\n\nN = 50 # num of data points\nX = 2 * rand(M, N) - 1.0 # input values\n\n# sample observation Y\nY, _ = LogisticRegression.sample_data(X, Sigma_w)\n\n########################\n# inference\nalpha = 1.0e-4 # learning rate\nmax_iter = 100000 # VI maximum iterations \n\n# learn variational parameters (mu & rho)\nmu, rho = LogisticRegression.VI(Y, X, M, Sigma_w, alpha, max_iter)\n\n########################\n# visualize (only for M=2)\nvisualize_surface(mu, rho, X, Y, \"prediction\")\nvisualize_contour(mu, rho, X, Y)\nshow()\n"
  },
  {
    "path": "src/demo_NMF.jl",
    "content": "##############################\n## Audio decomposition demo using NMF\n\nusing PyPlot, PyCall\nusing DataFrames\nusing Distributions\n\npush!(LOAD_PATH, \".\")\nimport NMF\n@pyimport scipy.io.wavfile as wf\n\n\n# load data\nwavfile = \"../data/organ.wav\"\nfs, data = wf.read(wavfile)\n\nfigure(\"data\")\nclf()\nPxx, freqs, t, pl = specgram(data[10000:318000,2], Fs=fs, NFFT=256, noverlap=0)\nxlabel(\"time [sec]\")\nylabel(\"frequency [Hz]\")\nylim([0,22000])\n\n# model\nD, N = size(Pxx)\nK = 2\na_t = 1.0\nb_t = 1.0\na_v = 1.0\nb_v = 100.0\nprior = NMF.NMFModel(a_t*ones(D,K), b_t*ones(D, K), a_v, b_v)\n\n# inference\nmax_iter = 100\nposterior, S_est, T_est, V_est = NMF.VI(Int64.(round.(Pxx)), prior, max_iter)\nX = T_est * V_est\n\n# visualize\nfigure(\"T\")\nclf()\nfor k in 1 : K\n    subplot(K,1,k)\n    plot(T_est[:,k], linewidth=1.0)\n    xlim([0, D])\n    ylim([0, ylim()[2]])\nend\n\nfigure(\"V\")\nclf()\nfor k in 1 : K\n    subplot(K,1,k)\n    plot(V_est[k,:], linewidth=1.0)\n    xlim([0,N])\n    ylim([0, ylim()[2]])\nend\nshow()\n"
  },
  {
    "path": "src/demo_PoissonHMM.jl",
    "content": "###################################\n## Example code\n## for Bayesian Poisson HMM\n\nusing PyPlot, PyCall\nusing HDF5, JLD\n@pyimport matplotlib.gridspec as gspec\n\npush!(LOAD_PATH,\".\")\nimport PoissonHMM\nimport PoissonMixtureModel\n\n\"\"\"\nSimple comparison between HMM and mixture model.\n\"\"\"\nfunction test_comparison()\n    #########################\n    ## load data\n    file_name = \"../data/timeseries.jld\"\n    X = load(file_name)[\"obs\"]\n    N = length(X)\n\n    #########################\n    ## Poison HMM\n\n    ## set model\n    K = 2 #  number of mixture components\n    alpha_phi = 10.0 * ones(K)\n    alpha_A = 100.0 * eye(K) + 1.0*ones(K, K)\n    cmp = [PoissonHMM.Gam(1.0, 0.01), PoissonHMM.Gam(1.0, 0.01)]\n    bhmm = PoissonHMM.BHMM(K, alpha_phi, alpha_A, cmp)\n    \n    ## inference\n    max_iter = 100\n    tic()\n    Z_est_hmm, post_bhmm = PoissonHMM.learn_VI(X, bhmm, max_iter)\n    toc()\n\n    #########################\n    ## Poison Mixture Model\n\n    ## set model\n    K = 2 #  number of mixture components\n    alpha_phi = 10.0 * ones(K)\n    cmp = [PoissonMixtureModel.Gam([1.0], 0.01), PoissonMixtureModel.Gam([1.0], 0.01)]\n    bpmm = PoissonMixtureModel.BPMM(1, K, alpha_phi, cmp)\n    \n    ## inference\n    max_iter = 100\n    tic()\n    Z_est_pmm, post_bpmm = PoissonMixtureModel.learn_VI(reshape(X, 1, N), bpmm, max_iter)\n    toc()\n\n    #########################\n    ## Compare results\n    figure(\"Hidden Markov Model vs Mixture Model\")\n    subplot(3,1,1);plot(X);ylabel(\"data\")\n    subplot(3,1,2);fill_between(1:N, reshape(Z_est_hmm[1,:]', N), zeros(N));ylim([0.0, 1.0]);ylabel(\"S (PHMM)\")\n    subplot(3,1,3);fill_between(1:N, reshape(Z_est_pmm[1,:]', N), zeros(N));ylim([0.0, 1.0]);ylabel(\"S (PMM)\")\n    show()\nend\n\ntest_comparison()\n"
  },
  {
    "path": "src/demo_PoissonMixtureModel.jl",
    "content": "###################################\n## Example code\n## for Bayesian Poisson Mixture Model\n\npush!(LOAD_PATH,\".\")\nusing PyPlot, PyCall\nimport PoissonMixtureModel\n\n\"\"\"\nVisualize data & estimation in 2D space.\n\"\"\"\nfunction visualize_2D(X::Matrix{Float64}, S::Matrix{Float64}, S_est::Matrix{Float64}, text)\n    cmp = get_cmap(\"jet\")\n\n    K1 = size(S, 1)\n    K2 = size(S_est, 1)\n    col1 = [pycall(cmp.o, PyAny, Int(round(val)))[1:3] for val in linspace(0,255,K1)]    \n    col2 = [pycall(cmp.o, PyAny, Int(round(val)))[1:3] for val in linspace(0,255,K2)]    \n\n    f, (ax1, ax2) = subplots(1,2,num=text)\n    f[:clf]()\n    f, (ax1, ax2) = subplots(1,2,num=text)\n    \n    for k in 1 : K1\n        ax1[:scatter](X[1, S[k,:].==1], X[2, S[k,:].==1], color=col1[k])\n    end\n    ax1[:set_title](\"truth\")\n    \n    for k in 1 : K2\n        ax2[:scatter](X[1, S_est[k,:].==1], X[2, S_est[k,:].==1], color=col2[k])\n    end\n\n    ax2[:set_title](\"estimation\")\nend\n\nfunction draw_hist(ax, X, S, label)\n    counts, bins, patches = ax[:hist](X', 20)\n    for i in 1 : length(patches)\n        if counts[i] > 0\n            S_tmp = S[:,bins[i] .<= X[1,:] .<= bins[i+1]]\n            S_sum = sum(S_tmp, 2) / sum(S_tmp)\n            patches[i][:set_facecolor]((S_sum[1], 0, S_sum[2]))\n        end\n    end\n    ax[:set_title](label)\nend\n\n\"\"\"\nVisualize data & estimation using 1D histogram.\n\"\"\"\nfunction visualize_1D(X::Matrix{Float64}, S::Matrix{Float64}, S_est::Matrix{Float64})\n    # separated figures\n    f1, ax1 = subplots(1,1,num=\"observation\")\n    f2, ax2 = subplots(1,1,num=\"estimation\")\n    f1[:clf]()\n    f2[:clf]()\n    _, ax1 = subplots(1,1,num=\"observation\")\n    _, ax2 = subplots(1,1,num=\"estimation\")\n    ax1[:hist](X', 20)\n    ax1[:set_title](\"observation\")\n    draw_hist(ax2, X, S_est, \"estimation\")    \nend\n\n\"\"\"\nRun a test script for 1D data clustering.\n\"\"\"\nfunction test_1D()\n    ## set model\n    D = 1 # data dimension, must be 1.\n    K = 2 #  number of mixture components, must be 2.\n    alpha = 100.0 * ones(K)\n    cmp = [PoissonMixtureModel.Gam(1.0*ones(D), 0.01) for i in 1 : K]\n    bpmm = PoissonMixtureModel.BPMM(D, K, alpha, cmp)\n    \n    ## generate data\n    N = 1000\n    pmm = PoissonMixtureModel.sample_PMM(bpmm)\n    X, S = PoissonMixtureModel.sample_data(pmm, N)\n    \n    ## inference\n    max_iter = 100\n    tic()\n    S_est, post_bpmm, VB = PoissonMixtureModel.learn_VI(X, bpmm, max_iter)\n    #S_est, post_bpmm, VB = PoissonMixtureModel.learn_GS(X, bpmm, max_iter)\n    #S_est, post_bpmm, VB = PoissonMixtureModel.learn_CGS(X, bpmm, max_iter)\n    toc()\n\n    ## plot\n    visualize_1D(X, S, S_est)\n\n    figure(\"ELBO\")\n    clf()\n    plot(VB)\n    ylabel(\"ELBO\")\n    xlabel(\"iterations\")\n    show()\nend\n\n\"\"\"\nRun a test script for 2D data clustering.\n\"\"\"\nfunction test_2D()\n    ## set model\n    D = 2 # data dimension, must be 2.\n    K = 8 # number of mixture components\n    #K = 5\n\n    alpha = 100.0 * ones(K)\n    cmp = [PoissonMixtureModel.Gam(1.0*ones(D), 0.01) for i in 1 : K]\n    bpmm = PoissonMixtureModel.BPMM(D, K, alpha, cmp)\n    \n    ## generate data\n    N = 300\n    pmm = PoissonMixtureModel.sample_PMM(bpmm)\n    X, S = PoissonMixtureModel.sample_data(pmm, N)\n    \n    ## inference\n    max_iter = 100\n    tic()\n    S_est, post_bpmm, VB = PoissonMixtureModel.learn_VI(X, bpmm, max_iter)\n    #S_est, post_bpmm, VB = PoissonMixtureModel.learn_GS(X, bpmm, max_iter)\n    #S_est, post_bpmm, VB = PoissonMixtureModel.learn_CGS(X, bpmm, max_iter)\n    toc()\n\n    ## plot\n    visualize_2D(X, S, PoissonMixtureModel.winner_takes_all(S_est), \"2D plot\")\n\n    # VB check\n    figure(\"ELBO\")\n    clf()\n    plot(VB)\n    ylabel(\"ELBO\")\n    xlabel(\"iterations\")\n    show()\nend\n\ntest_1D()\n#test_2D()\n\n"
  },
  {
    "path": "src/demo_PolynomialRegression.jl",
    "content": "#################################\n## Bayesian model selection demo\n## for polynomial regression\n\nusing PyPlot, PyCall\nusing Distributions\n\nfunction poly(X_raw, M)\n    N = size(X_raw, 1)\n    X = zeros(M, N)\n    for m in 0 : M - 1\n        X[m+1,:] = X_raw.^m\n    end\n    return X\nend\n\nfunction learn_bayes(X_raw, Y, M, sig2_y, Sig_w, X_lin)\n    X = poly(X_raw, M)\n    N = size(X_raw, 1)\n    \n    # calc posterior\n    Sig_w_h = inv(X*inv(sig2_y*eye(N))*X' + inv(Sig_w))\n    mu_w_h = Sig_w_h * (X * inv(sig2_y * eye(N)) * Y)\n\n    # calc predictive\n    X_test = poly(X_lin, M)\n    Y_est = (mu_w_h'*X_test)'\n    sig2_y_prd = sig2_y + diag(X_test'Sig_w_h*X_test)\n    \n    # calc evidence\n    evidence = -0.5*(sum(Y)*inv(sig2_y) +N*log.(sig2_y) + N*log.(2*pi)\n                     + logdet(Sig_w)\n                     - (mu_w_h'*inv(Sig_w_h)*mu_w_h)[1] - logdet(Sig_w_h)\n                     )\n    return Y_est, sqrt.(sig2_y_prd), evidence\nend\n\nfunction test()\n    # linspace\n    X_lin = linspace(-1, 7, 200)\n    \n    # generate data\n    N = 10\n    sig2_y = 0.1\n    X = 2*pi*rand(N)\n    Y_true = [sin.(x) for x in X_lin]\n    Y_obs = [sin.(x) + sig2_y * randn() for x in X]\n    \n    dims = [1, 2, 3, 4, 5, 10]\n    \n    # learning via Bayes\n    sig2_w = 1.0\n    Y_bayes = [learn_bayes(X, Y_obs, m, sig2_y, sig2_w*eye(m), X_lin) for m in dims]\n\n    #############\n    # compute evidences\n    evidence = [learn_bayes(X, Y_obs, m, sig2_y, sig2_w*eye(m), X_lin)[3] for m in dims]\n    figure(\"evidence\")\n    clf()\n    plot(1:length(dims), evidence)\n    xticks(1:length(dims),dims)\n    ylabel((\"\\$\\\\ln p(\\\\bf{Y}|\\\\bf{X})\\$\"), fontsize=20)\n    xlabel((\"\\$M\\$\"), fontsize=20)\n    \n    #############\n    # visualize\n    x_min = X_lin[1]\n    x_max = X_lin[end]\n    y_min = -4\n    y_max = 4\n    \n    figure(\"prediction\")\n    clf()\n    for k in 1 : 6\n        subplot(230 + k)\n        plot(X_lin, Y_bayes[k][1])\n        plot(X_lin, Y_bayes[k][1] + Y_bayes[k][2], \"c--\")\n        plot(X_lin, Y_bayes[k][1] - Y_bayes[k][2], \"c--\")\n        plot(X, Y_obs, \"ko\")\n        xlim([x_min, x_max])\n        ylim([y_min, y_max])\n        text(x_max - 2.5, y_max - 1, @sprintf(\"M=%d\", dims[k]))\n    end\n    show()\nend\n\ntest()\n\n"
  },
  {
    "path": "src/demo_Simple2DGauss.jl",
    "content": "###################################\n## Simple VI & GS for 2D Gaussian\n\nusing PyPlot\nusing Distributions\n\nfunction calc_KL(mu1, lambda1, mu2, lambda2)\n    D = size(mu1, 1)\n    px_lnqx = 0.5 * logdet(lambda2) - 0.5 * ((mu1 - mu2)' * lambda2 * (mu1 - mu2) + trace(lambda2 * inv(lambda1)))\n    px_lnpx = 0.5 * logdet(lambda1) - 0.5 * D\n    KL = - (px_lnqx - px_lnpx)\n    return KL[1]\nend\n\nfunction plot_results(result, truth)\n    N = size(result, 1)\n    H = Int(ceil(sqrt(N)))\n    W = Int(ceil(N / H))\n    for i in 1 : H\n        for j in 1 : W\n            n = (i - 1) * W + j\n            if n <= N\n                subplot(H, W, n)\n                title(\"$n of $N\")\n                plot_gaussian(truth[1], truth[2], \"b\", \"\\$p(z)\\$\")\n                plot_gaussian(result[n][1], result[n][2], \"r\", \"\\$p(z)\\$\")\n            end\n        end\n    end\nend\n\nfunction plot_lines(X)\n    D, N = size(X)\n    X_d = zeros(D, 2*N + 1)\n    X_d[:,1] = X[:,1]\n    for i in 1 : N\n        X_d[1, 2*i - 1] = X[1, i]\n        X_d[1, 2*i] = X[1, i]\n        X_d[2, 2*i] = X[2, i]\n        X_d[2, 2*i + 1] = X[2, i]\n    end\n    plot(X[1,:], X[2,:], \"oy\")\n    plot(X_d[1,1:2*N], X_d[2,1:2*N], \"--y\")\nend\n\nfunction plot_gaussian(Mu, Sigma, col, label)\n    res = 100\n    plot(Mu[1], Mu[2], \"x\", color=col)\n    \n    F = eigfact(Sigma)\n    vec = F.vectors\n    val = F.values\n    dw = 2*pi/res\n    w = dw * (0 : res)\n    \n    c = 1.0\n    a = sqrt(c*val[1])\n    b = sqrt(c*val[2])\n    P1 = a*cos.(w)\n    P2 = b*sin.(w)\n    P = Mu .+ vec'*vcat(P1', P2')\n    plot(P[1, :], P[2, :], \"-\", color=col, label=label)\nend\n\n\"\"\"\nVariational inference for 2D Gauss.\n\"\"\"\nfunction main_VI()\n    ## creat truth distribution\n    D = 2 # dimension\n    theta = 2.0*pi/12 # tilt\n    A = reshape([cos.(theta), -sin.(theta),\n                 sin.(theta), cos.(theta)],\n                2, 2)\n    mu = [0.0, 0.0]\n    lambda = inv(A * inv(reshape([1,0,0,10], 2, 2)) * A')\n    \n    ## initialize\n    #mu_h = randn(D)\n    mu_h = [-0.5, 0.3]\n    lambda_h = zeros(D,D)\n    \n    ## main iteration\n    max_iter = 10\n    KL = NaN * Array{Float64, 1}(max_iter)\n    result = Array{Any, 1}(max_iter)\n    for i in 1 : max_iter\n        ## update\n        mu_h[1] = mu[1] - inv(lambda[1,1])*lambda[1,2] * (mu_h[2] - mu[2])\n        \n        lambda_h[1,1] = lambda[1,1]\n        mu_h[2] = mu[2] - inv(lambda[2,2])*lambda[2,1] * (mu_h[1] - mu[1])\n        lambda_h[2,2] = lambda[2,2]\n        \n        ## calculate KL divergeince\n        KL[i] = calc_KL(mu_h, lambda_h, mu, lambda)\n\n        ## store the results\n        result[i] = [deepcopy(mu_h), deepcopy(inv(lambda_h))]\n    end\n\n    ## visualize results\n    figure(\"result per iteration (VI)\")\n    clf()\n    plot_results(result, (mu, inv(lambda)))\n\n    figure(\"result (VI)\")\n    clf()\n    plot_gaussian(mu, inv(lambda), \"b\", \"\\$p(\\\\bf{z})\\$\")\n    plot_gaussian(result[end][1], result[end][2], \"r\", \"\\$q(\\\\bf{z})\\$\")\n    xlabel(\"\\$z_1\\$\", fontsize=20)\n    ylabel(\"\\$z_2\\$\", fontsize=20)\n    legend(fontsize=16)\n    \n    figure(\"KL divergence (VI)\")\n    clf()\n    plot(1:max_iter, KL)\n    ylabel(\"KL divergence\", fontsize=16)\n    xlabel(\"iteration\", fontsize=16)\n    show()\nend\n\n\"\"\"\nGibbs sampling for 2D Gauss.\n\"\"\"\nfunction main_GS()\n    ## creat truth distribution\n    D = 2 # dimension\n    theta = 2.0*pi/12 # tilt\n    A = reshape([cos.(theta), -sin.(theta),\n                 sin.(theta), cos.(theta)],\n                2, 2)\n    mu = [0.0, 0.0]\n    #lambda = inv(A * inv(reshape([1,0,0,10], 2, 2)) * A')\n    lambda = inv(A * inv(reshape([1,0,0,100], 2, 2)) * A')\n\n    ## initialize\n    #max_iter = 1000\n    max_iter = 50\n    X = randn(D, max_iter)\n    mu_h = randn(D)\n    \n    ## main iteration\n    KL = NaN * Array{Float64, 1}(max_iter)\n    for i in 2 : max_iter\n        ## update\n        mu_h[1] = mu[1] - inv(lambda[1,1])*lambda[1,2] * (X[2,i-1] - mu[2])\n        X[1, i] = rand(Normal(mu_h[1], sqrt(inv(lambda[1,1]))))\n        \n        mu_h[2] = mu[2] - inv(lambda[2,2])*lambda[2,1] * (X[1,i] - mu[1])\n        X[2, i] = rand(Normal(mu_h[2], sqrt(inv(lambda[2,2]))))        \n        \n        if i > D\n            KL[i] = calc_KL(mean(X[:,1:i], 2), inv(cov(X[:,1:i], 2)), mu, lambda)\n        end\n    end\n    \n    ## visualize results\n    expt_mu = mean(X, 2)\n    expt_Sigma = cov(X, 2)\n\n    figure(\"samples (GS)\")\n    clf()\n    plot_lines(X)\n    plot_gaussian(mu, inv(lambda), \"b\", \"\\$p(\\\\bf{z})\\$\")\n    plot_gaussian(expt_mu, expt_Sigma, \"r\", \"\\$q(\\\\bf{z})\\$\")\n    xlabel(\"\\$z_1\\$\", fontsize=20)\n    ylabel(\"\\$z_2\\$\", fontsize=20)\n    legend(fontsize=16)\n    \n    figure(\"KL divergence (GS)\")\n    clf()\n    plot(1:max_iter, KL)\n    ylabel(\"KL divergence\", fontsize=16)\n    xlabel(\"sample size\", fontsize=16)\n    show()\nend\n\nmain_VI()\nmain_GS()\n"
  },
  {
    "path": "src/demo_SimpleFitting.jl",
    "content": "#####################################\n## Simple function fitting demo\n\nusing PyPlot, PyCall\nusing Distributions\n\n# true param\nW = Array([1.0, 0.0, 1.0])\n\n# generate data\nsigma = 0.5\nN = 20\nX = linspace(-0.4,2.4,N)\nY = [W[1] + W[2]*x + W[3]*x^2 + sigma*randn() for x in X]\nX_min = minimum(X)\nX_max = maximum(X)\n\n# regression1\nX_all = linspace(X_min, X_max, 100)\nW1 = sum(Y.*X) / sum(X.^2)\nY1 = [W1*x for x in X_all]\n\n# regression2\nX2 = zeros(3, N)\nX2[1,:] = 1\nX2[2,:] = X\nX2[3,:] = X.^2\nW2 = inv(X2*X2') * X2*Y\nY2 = [W2[1] + W2[2]*x + W2[3]*x^2 for x in X_all]\n\n# show data\nfigure()\nplot(X_all, Y1, \"b-\")\nplot(X_all, Y2, \"g-\")\nplot(X, Y, \"ko\")\nlegend([\"model1\",\"model2\",\"data\"], loc=\"upper left\", fontsize=16)\nxlabel(\"\\$x\\$\", fontsize=20)\nylabel(\"\\$y\\$\", fontsize=20)\nshow()\n"
  },
  {
    "path": "src/demo_nonconjugate.jl",
    "content": "\nusing PyPlot, PyCall\nusing Distributions\nimport StatsFuns.logsumexp\nPyDict(matplotlib[\"rcParams\"])[\"mathtext.fontset\"] = \"cm\"\nPyDict(matplotlib[\"rcParams\"])[\"mathtext.rm\"] = \"serif\"\nPyDict(matplotlib[\"rcParams\"])[\"lines.linewidth\"] = 1.5\nPyDict(matplotlib[\"rcParams\"])[\"font.family\"] = \"TakaoPGothic\"\n\nfunction expt(a, b, sigma, Y, X, N_s)\n    S = rand(Gamma(a, 1.0/b), N_s)\n    C = mean([exp(sum(logpdf.(Normal(s, sigma), Y))) for s in S])\n    curve = [exp(sum(logpdf.(Normal(mu, sigma), Y))) * pdf(Gamma(a, 1.0/b), mu) for mu in X]\n    m = mean([s*exp(sum(logpdf.(Normal(s, sigma), Y)))/C for s in S])\n    v = mean([(s-m)^2 * exp(sum(logpdf.(Normal(s, sigma), Y)))/C for s in S])\n    return curve/C, m, v\nend\n\nX = linspace(-5, 10, 1000)\n\na = 2.0\nb = 2.0\nmu = 1.0\nsigma=1.0\n\n# data\nN = 10\nY = rand(Normal(mu, sigma), N)\n\n# calc posterior\nN_s = 100000\nposterior, m, v = expt(a, b, sigma, Y, X, N_s)\n\na_h = m^2 / v\nb_h = m / v\n\nfigure()\nplot(X, pdf(Normal(mu,sigma), X))\nplot(X, pdf(Gamma(a,1.0/b), X))\nplot(X, posterior)\nplot(X, pdf(Gamma(a_h,1.0/b_h), X))\nplot(Y, 0.02*ones(N), \"o\")\nlegend([\"generator\", \"prior\", \"posterior\", \"approx\", \"samples\"])\n#legend([\"データ生成分布\", \"事前分布\", \"事後分布\", \"近似分布\", \"データ\"], fontsize=12)\nxlim([-3, 6])\nylim([0, 1.8])\n\n\n"
  }
]