[
  {
    "path": ".gitignore",
    "content": ".ipynb_checkpoints\n.Rhistory\n__pycache__"
  },
  {
    "path": "Chapter01/Interfacing_R.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: percent\n#       format_version: '1.3'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# %% [markdown]\n# ## The next cell will get a ~65 MB data file 'sequence.index', you only need to run the cell once\n\n# %%\n# !rm sequence.index 2>/dev/null\n# !wget -nd http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/20130502.phase3.sequence.index -O sequence.index\n\n# %% [markdown]\n# # Interfacing with R\n\n# %%\nimport os\n\nfrom IPython.display import Image\n\nimport rpy2.robjects as robjects\nimport rpy2.robjects.lib.ggplot2 as ggplot2\nfrom rpy2.robjects.functions import SignatureTranslatedFunction\n\nimport pandas as pd\n\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.conversion import localconverter\n\n# %%\nread_delim = robjects.r('read.delim')\nseq_data = read_delim('sequence.index', header=True, stringsAsFactors=False)\n#In R:\n#  seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE)\n\n# %%\nprint('This data frame has %d columns and %d rows' % (seq_data.ncol, seq_data.nrow))\nprint(seq_data.colnames)\n#In R:\n#  print(colnames(seq.data))\n#  print(nrow(seq.data))\n#  print(ncol(seq.data))\n\nprint('Columns in Python %d ' % robjects.r.ncol(seq_data)[0])\n\n#access some functions\nas_integer = robjects.r('as.integer')\nmatch = robjects.r.match\n\nmy_col = match('READ_COUNT', seq_data.colnames)[0] # Vector returned\nprint('Type of read count before as.integer: %s' % seq_data[my_col - 1].rclass[0])\nseq_data[my_col - 1] = as_integer(seq_data[my_col - 1])\nprint('Type of read count after as.integer: %s' % seq_data[my_col - 1].rclass[0])\n\nmy_col = match('BASE_COUNT', seq_data.colnames)[0] # Vector returned\nseq_data[my_col - 1] = as_integer(seq_data[my_col - 1])\n\nmy_col = match('CENTER_NAME', seq_data.colnames)[0]\nseq_data[my_col - 1] = robjects.r.toupper(seq_data[my_col - 1])\nrobjects.r.assign('seq.data', seq_data)\nrobjects.r('print(c(\"Column names in R: \",colnames(seq.data)))')\n\nrobjects.r('seq.data <- seq.data[seq.data$WITHDRAWN==0, ]')\n#Lets remove all withdrawn sequences\n\nrobjects.r(\"seq.data <- seq.data[, c('STUDY_ID', 'STUDY_NAME', 'CENTER_NAME', 'SAMPLE_ID', 'SAMPLE_NAME', 'POPULATION', 'INSTRUMENT_PLATFORM', 'LIBRARY_LAYOUT', 'PAIRED_FASTQ', 'READ_COUNT', 'BASE_COUNT', 'ANALYSIS_GROUP')]\")\n#Lets shorten the dataframe\n\n#Population as factor\nrobjects.r('seq.data$POPULATION <- as.factor(seq.data$POPULATION)')\n\n# %%\nggplot2.theme = SignatureTranslatedFunction(ggplot2.theme,\n                                            init_prm_translate = {'axis_text_x': 'axis.text.x'})\nbar = ggplot2.ggplot(seq_data) + ggplot2.geom_bar() + ggplot2.aes_string(x='CENTER_NAME') + ggplot2.theme(axis_text_x=ggplot2.element_text(angle=90, hjust=1, size=40), axis_text_y=ggplot2.element_text(size=40), text=ggplot2.element_text(size=40))\nrobjects.r.png('out.png', width=16, height=9, units=\"in\", res=600) \nbar.plot()\ndev_off = robjects.r('dev.off')\ndev_off()\nImage(filename='out.png')\n\n# %%\n#Get Yoruba and CEU\nrobjects.r('yri_ceu <- seq.data[seq.data$POPULATION %in% c(\"YRI\", \"CEU\") & seq.data$BASE_COUNT < 2E9 & seq.data$READ_COUNT < 3E7, ]')\nyri_ceu = robjects.r('yri_ceu')\n\n# %%\nscatter = ggplot2.ggplot(yri_ceu) + ggplot2.aes_string(x='BASE_COUNT', y='READ_COUNT', shape='factor(POPULATION)', col='factor(ANALYSIS_GROUP)') + ggplot2.geom_point()\nrobjects.r.png('out.png', width=16, height=9, units=\"in\", res=600)\nscatter.plot()\ndev_off = robjects.r('dev.off')\ndev_off()\nImage(filename='out.png')\n\n# %%\nwith localconverter(ro.default_converter + pandas2ri.converter):\n  pd_yri_ceu = ro.conversion.rpy2py(yri_ceu)\ndel pd_yri_ceu['PAIRED_FASTQ']\n# no_paired = pandas2ri.py2ri(pd_yri_ceu)\nwith localconverter(ro.default_converter + pandas2ri.converter):\n  no_paired = ro.conversion.py2rpy(pd_yri_ceu)\nrobjects.r.assign('no.paired', no_paired)\nrobjects.r(\"print(colnames(no.paired))\")\n\n# %%\n"
  },
  {
    "path": "Chapter01/R_magic.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: percent\n#       format_version: '1.3'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# %% [markdown]\n# ## The cell below will get the data file, you only need to run it once \n\n# %% [markdown]\n# (you do not need to do this if you have done it in the Interfacing_R notebook)\n\n# %%\n# !rm sequence.index 2>/dev/null\n# !wget -nd http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/20130502.phase3.sequence.index -O sequence.index\n\n# %%\nimport rpy2.robjects as robjects\nimport rpy2.robjects.lib.ggplot2 as ggplot2\n\n# %load_ext rpy2.ipython\n\n# %% language=\"R\"\n# seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE)\n# seq.data$READ_COUNT <- as.integer(seq.data$READ_COUNT)\n# seq.data$BASE_COUNT <- as.integer(seq.data$BASE_COUNT)\n\n# %%\n# seq_data = %R seq.data\nprint(type(seq_data))  #pandas dataframe???\n\n# %%\nmy_col = list(seq_data.columns).index(\"CENTER_NAME\")\nseq_data['CENTER_NAME'] = seq_data['CENTER_NAME'].apply(lambda x: x.upper())\n\n# %%\n# %R -i seq_data\n# %R print(colnames(seq_data))\n\n# %% language=\"R\"\n# seq_data <- seq_data[seq_data$WITHDRAWN==0, ]\n# seq_data$POPULATION <- as.factor(seq_data$POPULATION)\n\n# %% language=\"R\"\n# bar <- ggplot(seq_data) +  aes(factor(CENTER_NAME)) + geom_bar() + theme(axis.text.x = element_text(angle = 90, hjust = 1))\n# print(bar)\n\n# %% language=\"R\"\n# seq_data$POPULATION <- as.factor(seq_data$POPULATION)\n# yri_ceu <- seq_data[seq_data$POPULATION %in% c(\"YRI\", \"CEU\") & seq_data$BASE_COUNT < 2E9 & seq_data$READ_COUNT < 3E7, ]\n\n# %% language=\"R\"\n# scatter <- ggplot(yri_ceu, aes(x=BASE_COUNT, y=READ_COUNT, col=factor(ANALYSIS_GROUP), shape=POPULATION)) + geom_point()\n# print(scatter)\n\n# %% language=\"R\"\n# library(gridExtra)\n# library(grid)\n# g <- grid.arrange(bar, scatter, ncol=1)\n# g\n\n# %% language=\"R\"\n# png('fig.png')\n# g\n# dev.off()\n"
  },
  {
    "path": "Chapter01/base_setup.sh",
    "content": "conda create -n bioinformatics_base python=3.9.7 \n\nconda activate bioinformatics_base\nconda config --add channels bioconda \nconda config --add channels conda-forge\nconda install \\\n\tbiopython==1.79 \\\n\tjupyterlab==3.2.1 \\\n\tjupytext==1.13 \\\n\tmatplotlib==3.4.3 \\\n\tnumpy==1.21.3 \\\n\tpandas==1.3.4 \\\n\tscipy==1.7.1\nconda list --explicit > bioinformatics_base.txt\n"
  },
  {
    "path": "Chapter01/bioinformatics_base.txt",
    "content": "# This file may be used to create an environment using:\n# $ conda create --name <env> --file <this file>\n# platform: linux-64\n@EXPLICIT\nhttps://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2021.10.8-ha878542_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.36.1-hea4e1c9_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libgfortran5-11.2.0-h5c6108e_11.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-11.2.0-he4da1e4_11.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/mysql-common-8.0.27-ha770c72_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pandoc-2.15-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/tzdata-2021e-he74cb21_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-11.2.0-h69a702a_11.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libgomp-11.2.0-h1d223b6_11.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-1_gnu.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-11.2.0-h1d223b6_11.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/alsa-lib-1.2.3-h516909a_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/expat-2.4.1-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/icu-68.2-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/jbig-2.1-h7f98852_2003.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/jpeg-9d-h36c2ea0_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/lerc-3.0-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.8-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h9c3ff4c_4.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libiconv-1.16-h516909a_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libogg-1.3.4-h7f98852_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.18-pthreads_h8fe5266_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libopus-1.3.1-h7f98852_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.18-h36c2ea0_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libuuid-2.32.1-h7f98852_1000.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.2.1-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.11-h36c2ea0_1013.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.9.3-h9c3ff4c_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/ncurses-6.2-h58526e2_4.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/nspr-4.32-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/openssl-1.1.1l-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pcre-8.45-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-h36c2ea0_1001.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.9-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.3-h7f98852_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/xz-5.2.5-h516909a_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h516909a_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/gettext-0.19.8.1-h73d1719_1008.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-12_linux64_openblas.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20191231-he28a2e2_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libevent-2.1.10-h9b69904_4.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libvorbis-1.3.7-h9c3ff4c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libxcb-1.13-h7f98852_1003.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/readline-8.1-h46c0cb4_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.4-h9c3ff4c_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/zlib-1.2.11-h36c2ea0_1013.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-12_linux64_openblas.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libglib-2.70.0-h174f98d_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-12_linux64_openblas.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libllvm11-11.1.0-hf817b99_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.37-h21135ba_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libxml2-2.9.12-h72842e0_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/sqlite-3.36.0-h9cd32fc_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/tk-8.6.11-h27826a3_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.0-ha95c52a_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/freetype-2.10.4-h0708190_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/glib-tools-2.70.0-h780b84a_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/gstreamer-1.18.5-h76c114f_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/krb5-1.19.2-hcc1bbae_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libclang-11.1.0-default_ha53f305_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libtiff-4.3.0-h6f004c6_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libxkbcommon-1.0.3-he3ba5ed_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/mysql-libs-8.0.27-hfa10184_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/nss-3.69-hb5efdd6_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/python-3.9.7-hb7a2778_3_cpython.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/async_generator-1.10-py_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/attrs-21.2.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/backcall-0.2.0-pyh9f0ad1d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/backports-1.0-py_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/charset-normalizer-2.0.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/decorator-5.1.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/entrypoints-0.3-pyhd8ed1ab_1003.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.13.1-hba837de_1005.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/glib-2.70.0-h780b84a_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/gst-plugins-base-1.18.5-hf529b03_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/idna-3.1-pyhd3deb0d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/ipython_genutils-0.2.0-py_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/json5-0.9.5-pyh9f0ad1d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/lcms2-2.12-hddcbb42_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/libpq-13.3-hd57d9b9_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.5.1-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/olefile-0.46-pyh9f0ad1d_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.4.0-hb52868f_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/parso-0.8.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pickleshare-0.7.5-py_1003.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.11.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd3deb0d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pycparser-2.20-pyh9f0ad1d_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pyparsing-3.0.3-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/python_abi-3.9-2_cp39.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pytz-2021.3-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/send2trash-1.8.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/testpath-0.5.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/toml-0.10.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/traitlets-5.1.1-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/typing_extensions-3.10.0.2-pyha770c72_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-py_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/wheel-0.37.0-pyhd8ed1ab_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/zipp-3.6.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/babel-2.9.1-pyh44b312d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/certifi-2021.10.8-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/cffi-1.14.6-py39h4bc2ebd_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/chardet-4.0.0-py39hf3d152e_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/cycler-0.10.0-py_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/dbus-1.13.6-h48d8840_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/debugpy-1.4.1-py39he80948d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/importlib-metadata-4.8.1-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/jedi-0.18.0-py39hf3d152e_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/jupyter_core-4.9.1-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.3.2-py39h1a9c180_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/markdown-it-py-1.1.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.0.1-py39h3811e60_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.1.3-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/mistune-0.8.4-py39h3811e60_1004.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/numpy-1.21.3-py39hdbf815f_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/packaging-21.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pexpect-4.8.0-pyh9f0ad1d_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pillow-8.3.2-py39ha612740_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyqt5-sip-4.19.18-py39he80948d_7.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyrsistent-0.17.3-py39h3811e60_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pysocks-1.7.1-py39hf3d152e_3.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.8.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0-py39h3811e60_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyzmq-22.3.0-py39h37b5a0c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/setuptools-58.2.0-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/sniffio-1.2.0-py39hf3d152e_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/tornado-6.1-py39h3811e60_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/websocket-client-0.57.0-py39hf3d152e_4.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/anyio-3.3.4-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-21.1.0-py39h3811e60_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/backports.functools_lru_cache-1.6.4-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/biopython-1.79-py39h3811e60_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/bleach-4.1.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/brotlipy-0.7.0-py39h3811e60_1001.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/cryptography-35.0.0-py39h95dcef6_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jinja2-3.0.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jsonschema-4.1.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupyter_client-7.0.6-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.4.3-py39h2fa2bec_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.2.8-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pandas-1.3.4-py39hde0f152_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pip-21.3.1-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pygments-2.10.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/qt-5.12.9-hda022c4_4.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/scipy-1.7.1-py39hee8e79c_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/terminado-0.12.1-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.1.2-pyh9f0ad1d_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/nbformat-5.1.3-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/pyopenssl-21.0.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyqt-impl-5.12.3-py39h0fcd23e_7.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.5-pyh9f0ad1d_2.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupytext-1.13.0-pyh6002c4b_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/nbclient-0.5.4-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.21-pyha770c72_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyqtchart-5.12-py39h0fcd23e_7.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyqtwebengine-5.12.1-py39h0fcd23e_7.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.7-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/ipython-7.28.0-py39hef51801_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/nbconvert-6.2.0-py39hf3d152e_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/pyqt-5.12.3-py39hf3d152e_7.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/requests-2.26.0-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/ipykernel-6.4.2-py39hef51801_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/linux-64/matplotlib-3.4.3-py39hf3d152e_1.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/requests-unixsocket-0.2.0-py_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupyter_server-1.11.1-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/notebook-6.4.5-pyha770c72_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.8.2-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/nbclassic-0.3.4-pyhd8ed1ab_0.tar.bz2\nhttps://conda.anaconda.org/conda-forge/noarch/jupyterlab-3.2.1-pyhd8ed1ab_0.tar.bz2\n\n"
  },
  {
    "path": "Chapter02/.gitignore",
    "content": "*png\nVAERSDataUseGuide_en_September2021.pdf"
  },
  {
    "path": "Chapter02/Arrow.py",
    "content": "import gzip\nimport pandas as pd\nfrom pyarrow import csv\nimport pyarrow.compute as pc\n\nvdata_pd = pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\ncolumns = list(vdata_pd.columns)\nvdata_pd.info(memory_usage=\"deep\")\n\nvdata_arrow = csv.read_csv(\"2021VAERSDATA.csv.gz\")\ntot_bytes = sum([\n    vdata_arrow[name].nbytes\n    for name in vdata_arrow.column_names])\nprint(f\"Total {tot_bytes // (1024 ** 2)} MB\")\n\nfor name in vdata_arrow.column_names:\n    arr_bytes = vdata_arrow[name].nbytes\n    arr_type = vdata_arrow[name].type\n    pd_bytes = vdata_pd[name].memory_usage(index=False, deep=True)\n    pd_type = vdata_pd[name].dtype\n    print(\n        name,\n        arr_type, arr_bytes // (1024 ** 2),\n        pd_type, pd_bytes // (1024 ** 2),)\n\n\n# %timeit pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n# %timeit csv.read_csv(\"2021VAERSDATA.csv.gz\")\n\n\n# REMOVE SYMPTOM_TEXT\n\n\nvdata_pd = pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\", usecols=lambda x: x != \"SYMPTOM_TEXT\")\ndata_pd.info(memory_usage=\"deep\")\n\n#columns.remove(\"SYMPTOM_TEXT\")\nvdata_arrow = csv.read_csv(\n    \"2021VAERSDATA.csv.gz\",\n     convert_options=csv.ConvertOptions(include_columns=columns))\nvdata_arrow.nbytes\n\n# %timeit pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\", usecols=lambda x: x != \"SYMPTOM_TEXT\")\n# %timeit csv.read_csv(\"2021VAERSDATA.csv.gz\", convert_options=csv.ConvertOptions(include_columns=columns))\n\nvdata = vdata_arrow.to_pandas()\nvdata.info(memory_usage=\"deep\")\n\n\n\n# Theres more\nvdata = vdata_arrow.to_pandas(self_destruct=True)\n\n"
  },
  {
    "path": "Chapter02/Matplotlib.py",
    "content": "import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nvdata = pd.read_csv(\n    \"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\",\n    usecols=lambda name: name != \"SYMPTOM_TEXT\")\n\nnum_rows = len(vdata)\nperc_nan = {}\nfor col_name in vdata.columns:\n    num_nans = len(vdata[col_name][vdata[col_name].isna()])\n    perc_nan[col_name] = 100 * num_nans / num_rows\nlabels = perc_nan.keys()\nbar_values = list(perc_nan.values())\nx_positions = np.arange(len(labels))\n\nfig = plt.figure()\nfig.suptitle(\"Fraction of empty values per column\")\nax = fig.add_subplot()\nax.bar(x_positions, bar_values)\nax.set_ylabel(\"Percent of empty values\")\nax.set_xlabel(\"Column\")\nax.set_xticks(x_positions)\nax.set_xticklabels(labels)\nax.legend()\nfig.savefig(\"naive_chart.png\")\n\n# OO interface vs matlab...\n\nfig = plt.figure(figsize=(16, 9), tight_layout=True, dpi=600)\nfig.suptitle(\"Fraction of empty values per column\", fontsize=\"48\")\nax = fig.add_subplot()\nb1 = ax.bar(x_positions, bar_values)\nax.set_ylabel(\"Percent of empty values\", fontsize=\"xx-large\")\nax.set_xticks(x_positions)\nax.set_xticklabels(labels, rotation=45, ha=\"right\")\nax.set_ylim(0, 100)\nax.set_xlim(-0.5, len(labels))\nfor i, x in enumerate(x_positions):\n    ax.text(\n        x, 2, \"%.1f\" % bar_values[i], rotation=90,\n        va=\"bottom\", ha=\"center\",\n        backgroundcolor=\"white\")\nfig.text(0.2, 0.01, \"Column\", fontsize=\"xx-large\")\nfig.savefig(\"cleaner_chart.png\")\n\n\ndead = vdata[vdata.DIED == \"Y\"]\nvax = pd.read_csv(\"2021VAERSVAX.csv.gz\", encoding=\"iso-8859-1\").set_index(\"VAERS_ID\")\n\nvax.groupby(\"VAX_TYPE\").size().sort_values()\n\nvax_dead = dead.join(vax, on=\"VAERS_ID\", how=\"inner\")\n# join on id, discuss\n\n\nvax_dead.iloc[0]\n\ndead_counts = vax_dead[\"VAX_TYPE\"].value_counts()\nlarge_values = dead_counts[dead_counts >= 10]\nother_sum = dead_counts[dead_counts < 10].sum()\nlarge_values = large_values.append(pd.Series({\"OTHER\": other_sum}))\n\ndistance_df = vax_dead[vax_dead.DATEDIED.notna() & vax_dead.VAX_DATE.notna()]\ndistance_df[\"DATEDIED\"] = pd.to_datetime(distance_df[\"DATEDIED\"])\ndistance_df[\"VAX_DATE\"] = pd.to_datetime(distance_df[\"VAX_DATE\"])\ndistance_df = distance_df[distance_df.DATEDIED >= \"2021\"]\ndistance_df = distance_df[distance_df.VAX_DATE >= \"2021\"]\ndistance_df = distance_df[distance_df.DATEDIED >= distance_df.VAX_DATE]\ntime_distances = distance_df[\"DATEDIED\"] - distance_df[\"VAX_DATE\"]\ntime_distances_d = time_distances.astype(int) / (10**9 * 60 * 60 * 24)\n\ndate_died = pd.to_datetime(vax_dead[vax_dead.DATEDIED.notna()][\"DATEDIED\"])\ndate_died = date_died[date_died >= \"2021\"]\ndate_died_counts = date_died.value_counts().sort_index()\ncum_deaths = date_died_counts.cumsum()\n\nstate_dead = vax_dead[vax_dead[\"STATE\"].notna()][[\"STATE\", \"SEX\"]]\ntop_states = sorted(state_dead[\"STATE\"].value_counts().head(10).index)\ntop_state_dead = state_dead[state_dead[\"STATE\"].isin(top_states)].groupby([\"STATE\", \"SEX\"]).size()#.reset_index()\ntop_state_dead.loc[\"MN\", \"U\"] = 0  # XXXX\ntop_state_dead = top_state_dead.sort_index().reset_index()\ntop_state_females = top_state_dead[top_state_dead.SEX == \"F\"][0]\ntop_state_males = top_state_dead[top_state_dead.SEX == \"M\"][0]\ntop_state_unk = top_state_dead[top_state_dead.SEX == \"U\"][0]\n\nfig, ((vax_cnt, time_dist), (death_time, state_reps)) = plt.subplots(\n    2, 2,\n    figsize=(16, 9), tight_layout=True, dpi=600)\n\nvax_cnt.set_title(\"Vaccines involved in deaths\")\nwedges, texts = vax_cnt.pie(large_values)\nvax_cnt.legend(wedges, large_values.index, loc=\"lower left\")\n\ntime_dist.hist(time_distances_d, bins=50)\ntime_dist.set_title(\"Days between vaccine administration and death\")\ntime_dist.set_xlabel(\"Days\")\ntime_dist.set_ylabel(\"Observations\")\n\ndeath_time.plot(date_died_counts.index, date_died_counts, \".\")\ndeath_time.set_title(\"Deaths over time\")\ndeath_time.set_ylabel(\"Daily deaths\")\ndeath_time.set_xlabel(\"Date\")\ntw = death_time.twinx()\ntw.plot(cum_deaths.index, cum_deaths)\ntw.set_ylabel(\"Cummulative deaths\")\n\nstate_reps.set_title(\"Deaths per state stratified by sex\")\nstate_reps.bar(top_states, top_state_females, label=\"Females\")\nstate_reps.bar(top_states, top_state_males, label=\"Males\", bottom=top_state_females)\nstate_reps.bar(top_states, top_state_unk, label=\"Unknown\",\n               bottom=top_state_females.values + top_state_males.values)\nstate_reps.legend()\nstate_reps.set_xlabel(\"State\")\nstate_reps.set_ylabel(\"Deaths\")\n\nfig.savefig(\"summary.png\")\n\nfig\n\n\n\n"
  },
  {
    "path": "Chapter02/NumPy.py",
    "content": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nvdata = pd.read_csv(\n    \"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n\nvdata[\"STATE\"] = vdata[\"STATE\"].str.upper()\ntop_states = pd.DataFrame({\n    \"size\": vdata.groupby(\"STATE\").size().sort_values(ascending=False).head(5)}).reset_index()\ntop_states[\"rank\"] = top_states.index\ntop_states = top_states.set_index(\"STATE\")\ntop_vdata = vdata[vdata[\"STATE\"].isin(top_states.index)]\ntop_vdata[\"state_code\"] = top_vdata[\"STATE\"].apply(\n    lambda state: top_states[\"rank\"].at[state]\n).astype(np.uint8)\ntop_vdata = top_vdata[top_vdata[\"AGE_YRS\"].notna()]\ntop_vdata.loc[:,\"AGE_YRS\"] = top_vdata[\"AGE_YRS\"].astype(int)\ntop_states\n\nage_state = top_vdata[[\"state_code\", \"AGE_YRS\"]]\nage_state[\"state_code\"]\nstate_code_arr = age_state[\"state_code\"].values\ntype(state_code_arr), state_code_arr.shape, state_code_arr.dtype\n\nage_state[\"AGE_YRS\"]\nage_arr = age_state[\"AGE_YRS\"].values\ntype(age_arr), age_arr.shape, age_arr.dtype\n\nage_arr.max()\n\nage_state_mat = np.zeros((5,6), dtype=np.uint64)\nfor row in age_state.itertuples():\n    age_state_mat[row.state_code, row.AGE_YRS//20] += 1\nage_state_mat\n\ncal = age_state_mat[0,:]\nkids = age_state_mat[:,0]\n\ndef compute_frac(arr_1d):\n    return arr_1d / arr_1d.sum()\n\nfrac_age_stat_mat = np.apply_along_axis(compute_frac, 1, age_state_mat)\n\nperc_age_stat_mat = frac_age_stat_mat * 100\nperc_age_stat_mat = perc_age_stat_mat.astype(np.uint8)\nperc_age_stat_mat\n\nperc_age_stat_mat = perc_age_stat_mat[:, :5]\nperc_age_stat_mat\n\n\nfig = plt.figure()\nax = fig.add_subplot()\nax.matshow(perc_age_stat_mat, cmap=plt.get_cmap(\"Greys\"))\nax.set_yticks(range(5))\nax.set_yticklabels(top_states.index)\nax.set_xticks(range(6))\nax.set_xticklabels([\"0-19\", \"20-39\", \"40-59\", \"60-79\", \"80-99\", \"100-119\"])\nfig.savefig(\"matrix.png\")\n\n\n"
  },
  {
    "path": "Chapter02/Pandas_Basic.py",
    "content": "# # Using Pandas to process vaccine adverse events\n#\n# ## Data Access\n#\n# Go to https://vaers.hhs.gov/data/datasets.html and Download 2021 **zip** Data. Please do not download only the CSV File.\n#\n# Drop it on the directory where this notebook is.\n\n\n# !unzip 2021VAERSData.zip\n# !gzip -9 *csv\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nvdata = pd.read_csv(\n    \"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n\nvdata.columns\n\nvdata.dtypes\n\nvdata.shape\n\nvdata.iloc[0]\n\nvdata = vdata.set_index(\"VAERS_ID\")\n\nvdata.loc[916600]\n\nvdata.head(3)\n\nvdata.iloc[:3]\n\nvdata.iloc[:5, 2:4]\n\nvdata[\"AGE_YRS\"].max()\n\nvdata.AGE_YRS.max()\n\nvdata[\"AGE_YRS\"].sort_values().plot(use_index=False)\n\nvdata[\"AGE_YRS\"].sort_values().plot(use_index=False)\n\nfig, ax = plt.subplots(1, 2, sharey=True, dpi=300)\nfig.suptitle(\"Age of adverse events\")\nvdata[\"AGE_YRS\"].sort_values().plot(\n    use_index=False, ax=ax[0],\n    xlabel=\"Obervation\", ylabel=\"Age\")\nvdata[\"AGE_YRS\"].plot.hist(bins=20, orientation=\"horizontal\")\nfig.savefig(\"adverse.png\")\n\nvdata[\"AGE_YRS\"].dropna().apply(lambda x: int(x)).value_counts()\n# not documented\n\nvdata.DIED.value_counts(dropna=False)\n# NA is a problem, how to be implemented\n\n\nvdata[\"is_dead\"] = (vdata.DIED == \"Y\")\n\n\ndead = vdata[vdata.is_dead]\nvax = pd.read_csv(\"2021VAERSVAX.csv.gz\", encoding=\"iso-8859-1\").set_index(\"VAERS_ID\")\nprint(vax.columns)\nprint(vax.shape)\nprint(vax.VAX_TYPE.unique())\n\nvax.groupby(\"VAX_TYPE\").size().sort_values()\n\nvax19 = vax[vax.VAX_TYPE == \"COVID19\"]\nvax19_dead = dead.join(vax19)\n# join on id, discuss\nvax19_dead.index.value_counts()\n\nbaddies = vax19_dead.groupby(\"VAX_LOT\").size().sort_values(ascending=False)\nfor i, (lot, cnt) in enumerate(baddies.items()):\n    print(lot, cnt, len(vax19_dead[vax19_dead.VAX_LOT == lot].groupby(\"STATE\")))\n    if i == 10:\n        break\n\n\n# The data above is not totally correct - at least in terms of interpretation, but for that we need to check the next recipe\n"
  },
  {
    "path": "Chapter02/Pandas_Join.py",
    "content": "# # Pandas advanced\n\nimport numpy as np\nimport pandas as pd\n\n# # Code to sample original data\n#\n# ```\n# vdata = pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n# vdata.sample(frac=0.9).to_csv(\"vdata_sample.csv.gz\", index=False)\n# vax = pd.read_csv(\"2021VAERSVAX.csv.gz\", encoding=\"iso-8859-1\")\n# vax.sample(frac=0.9).to_csv(\"vax_sample.csv.gz\", index=False)\n# ```\n\nvdata = pd.read_csv(\"vdata_sample.csv.gz\") # No encoding\nvax = pd.read_csv(\"vax_sample.csv.gz\")\n\nvdata_with_vax = vdata.join(\n    vax.set_index(\"VAERS_ID\"),\n    on=\"VAERS_ID\",\n    how=\"inner\")\n\nlen(vdata), len(vax), len(vdata_with_vax)\n\nlost_vdata = vdata.loc[~vdata.index.isin(vdata_with_vax.index)]\nlost_vdata\n\nlost_vax = vax[~vax[\"VAERS_ID\"].isin(vdata_with_vax[\"VAERS_ID\"])]\nlost_vax\n\n\n# Left, Right and outer caveats\n\n\nvdata_with_vax_left = vdata.join(\n    vax.set_index(\"VAERS_ID\"),\n    on=\"VAERS_ID\")\n\nvdata_with_vax_left.groupby(\"VAERS_ID\").size().sort_values()\n\nlen(vdata_with_vax_left), len(vdata_with_vax_left.VAERS_ID.unique())\n\n# +\n#vdata_all = pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n#vax_all = pd.read_csv(\"2021VAERSVAX.csv.gz\", encoding=\"iso-8859-1\")\n# -\n\ndead = vdata[vdata.DIED == \"Y\"]\nvax19 = vax[vax.VAX_TYPE == \"COVID19\"]\nvax19_dead = vax19.join(dead.set_index(\"VAERS_ID\"), on=\"VAERS_ID\", how=\"right\")\n# join on id, discuss\n\nlen(vax19), len(dead), len(vax19_dead)\n\nlen(vax19_dead[vax19_dead.VAERS_ID.duplicated()])\n\nlen(vax19_dead) - len(dead)\n\nvax19_dead[\"STATE\"] = vax19_dead[\"STATE\"].str.upper()\ndead_lot = vax19_dead[[\"VAERS_ID\", \"VAX_LOT\", \"STATE\"]].set_index([\"VAERS_ID\", \"VAX_LOT\"])\ndead_lot_clean = dead_lot[~dead_lot.index.duplicated()]\ndead_lot_clean = dead_lot_clean.reset_index()\ndead_lot_clean[dead_lot_clean.VAERS_ID.isna()]\n\nbaddies = dead_lot_clean.groupby(\"VAX_LOT\").size().sort_values(ascending=False)\nfor i, (lot, cnt) in enumerate(baddies.items()):\n    print(lot, cnt, len(dead_lot_clean[dead_lot_clean.VAX_LOT == lot].groupby(\"STATE\")))\n    if i == 10:\n        break\n"
  },
  {
    "path": "Chapter02/Pandas_Memory.py",
    "content": "# # Pandas advanced\n\nimport numpy as np\nimport pandas as pd\n\nvdata = pd.read_csv(\"2021VAERSDATA.csv.gz\", encoding=\"iso-8859-1\")\n\nvdata.info(memory_usage=\"deep\")\n\nfor name in vdata.columns:\n    col_bytes = vdata[name].memory_usage(index=False, deep=True)\n    col_type = vdata[name].dtype\n    print(\n        name,\n        col_type, col_bytes // (1024 ** 2))\n\nvdata.DIED.memory_usage(index=False, deep=True)\n\nvdata.DIED.fillna(False).astype(bool).memory_usage(index=False, deep=True)\n\nvdata.STATE.unique()\n\nvdata[\"STATE\"] = vdata.STATE.str.upper()\n\nstates = list(vdata[\"STATE\"].unique())\nstates\n\nvdata[\"encoded_state\"] = vdata.STATE.apply(lambda state: states.index(state))\nvdata[\"encoded_state\"] = vdata[\"encoded_state\"].astype(np.uint8)\n\nvdata[[\"encoded_state\", \"STATE\"]].head(10)\n\nvdata[\"STATE\"].memory_usage(index=False, deep=True)\n\nvdata[\"encoded_state\"].memory_usage(index=False, deep=True)\n\nvdata.index\n\nstates = list(pd.read_csv(\n    \"vdata_sample.csv.gz\",\n    converters={\n       \"STATE\": lambda state: state.upper()  # You need to know the states in advance\n    },\n    usecols=[\"STATE\"]\n)[\"STATE\"].unique())\n\nvdata = pd.read_csv(\n    \"vdata_sample.csv.gz\",\n    index_col=\"VAERS_ID\",\n    converters={\n       \"DIED\": lambda died: died == \"Y\",\n       \"STATE\": lambda state: states.index(state.upper())\n    },\n    usecols=lambda name: name != \"SYMPTOM_TEXT\"\n)\nvdata[\"STATE\"] = vdata[\"STATE\"].astype(np.uint8)\nvdata.info(memory_usage=\"deep\")\n\n\n"
  },
  {
    "path": "Chapter03/Accessing_Databases.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nfrom Bio import Entrez, Medline, SeqIO\n\n# ### Do not forget to inform NCBI of your email address (change below)\n\nEntrez.email = \"put@your_email.here\" \n\n#This gives you the list of available databases\nhandle = Entrez.einfo()\nrec = Entrez.read(handle)\nprint(rec)\n\nhandle = Entrez.esearch(db=\"nucleotide\", term='CRT[Gene Name] AND \"Plasmodium falciparum\"[Organism]')\nrec_list = Entrez.read(handle)\nif int(rec_list['RetMax']) < int(rec_list['Count']):\n    handle = Entrez.esearch(db=\"nucleotide\", term='CRT[Gene Name] AND \"Plasmodium falciparum\"[Organism]',\n                            retmax=rec_list['Count'])\n    rec_list = Entrez.read(handle)\n\nid_list = rec_list['IdList']\nhdl = Entrez.efetch(db='nucleotide', id=id_list, rettype='gb', retmax=rec_list['Count'])\n\nrecs = list(SeqIO.parse(hdl, 'gb'))\n\nfor rec in recs:\n    if rec.name == 'KM288867':\n        break\nprint(rec.name)\nprint(rec.description)\n\nfor feature in rec.features:\n    if feature.type == 'gene':\n        print(feature.qualifiers['gene'])\n    elif feature.type == 'exon':\n        loc = feature.location\n        print('Exon', loc.start, loc.end, loc.strand)\n    else:\n        print('not processed:\\n%s' % feature)\n\nfor name, value in rec.annotations.items():\n    print('%s=%s' % (name, value))\n\nprint(len(rec.seq))\n\nrefs = rec.annotations['references']\nprint(refs)\nfor ref in refs:\n    if ref.pubmed_id != '':\n        print(ref.pubmed_id)\n        handle = Entrez.efetch(db=\"pubmed\", id=[ref.pubmed_id],\n                                rettype=\"medline\", retmode=\"text\")\n        records = Medline.parse(handle)\n        for med_rec in records:\n            for k, v in med_rec.items():\n                print('%s: %s' % (k, v))\n\n\n"
  },
  {
    "path": "Chapter03/Basic_Sequence_Processing.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nfrom Bio import Entrez, Seq, SeqIO, SeqRecord\n\nEntrez.email = \"put@your_email.here\" \nhdl = Entrez.efetch(db='nucleotide', id=['NM_002299'], rettype='gb')  # Lactase gene\n#for l in hdl:\n#    print l\ngb_rec = SeqIO.read(hdl, 'gb')\n\nfor feature in gb_rec.features:\n    if feature.type == 'CDS':\n        location = feature.location  # Note translation existing\ncds = SeqRecord.SeqRecord(gb_rec.seq[location.start:location.end], 'NM_002299', description='LCT CDS only')\n\nw_hdl = open('example.fasta', 'w')\nSeqIO.write([cds], w_hdl, 'fasta')\nw_hdl.close()\n\nrecs = SeqIO.parse('example.fasta', 'fasta')\nfor rec in recs:\n    seq = rec.seq\n    print(rec.description)\n    print(seq[:10])\n\nprint((seq[:12], seq[-12:]))\nrna = seq.transcribe()\nrna\n\nprot = seq.translate()\nprot\n\n\n"
  },
  {
    "path": "Chapter03/Filtering_SNPs.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Getting the necessary data\n\n# You will need to do this only once\n\n# !rm -rf centro.vcf.gz 2>/dev/null\n# !rm -rf standard.vcf.gz 2>/dev/null\n# !tabix -fh ftp://ngs.sanger.ac.uk/production/ag1000g/phase1/preview/ag1000g.AC.phase1.AR1.vcf.gz 3L:1-200000 |bgzip -c > centro.vcf.gz\n# !tabix -fh ftp://ngs.sanger.ac.uk/production/ag1000g/phase1/preview/ag1000g.AC.phase1.AR1.vcf.gz 3L:21000000-21200000 |bgzip -c > standard.vcf.gz       \n# !tabix -p vcf centro.vcf.gz\n# !tabix -p vcf standard.vcf.gz\n\n# # Recipe\n\n# +\nfrom collections import defaultdict\nimport functools\n\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom cyvcf2 import VCF\n\n\n# -\n\ndef do_window(recs, size, fun):\n    start = None\n    win_res = []\n    for rec in recs:\n        if not rec.is_snp or len(rec.ALT) > 1:\n            continue\n        if start is None:\n            start = rec.POS\n        my_win = 1 + (rec.POS - start) // size\n        while len(win_res) < my_win:\n            win_res.append([])\n        win_res[my_win - 1].extend(fun(rec))\n    return win_res\n\n\ndef apply_win_funs(wins, funs):\n    fun_results = []\n    for win in wins:\n        my_funs = {}\n        for name, fun in funs.items():\n            try:\n                my_funs[name] = fun(win)\n            except:\n                my_funs[name] = None\n        fun_results.append(my_funs)\n    return fun_results\n\n\nwins = {}\nsize = 2000\nnames = ['centro.vcf.gz', 'standard.vcf.gz']\nfor name in names:\n    recs = VCF(name)\n    wins[name] = do_window(recs, size, lambda x: [1])\n\nstats = {}\nfig, ax = plt.subplots(figsize=(16, 9), dpi=300, tight_layout=True)\nfor name, nwins in wins.items():\n    stats[name] = apply_win_funs(nwins, {'sum': sum})\n    x_lim = [i * size  for i in range(len(stats[name]))]\n    ax.plot(x_lim, [x['sum'] for x in stats[name]], label=name)\nax.legend()\nax.set_xlabel('Genomic location in the downloaded segment', fontsize='xx-large')\nax.set_ylabel('Number of variant sites (bi-allelic SNPs)', fontsize='xx-large')\nfig.suptitle('Number of bi-allelic SNPs along the genome', fontsize='xx-large')\nfig.savefig('bi.png')\n\n# +\nmq0_wins = {}\nsize = 5000\n\ndef get_sample(rec, annot, my_type):\n    return [v for v in rec.format(annot) if v > np.iinfo(my_type).min]\n\nfor name in names:\n    recs = VCF(name)\n    mq0_wins[name] = do_window(recs, size, functools.partial(get_sample, annot='MQ0', my_type=np.int32))\n# -\n\nstats = {}\ncolors = ['b', 'g']\ni = 0\nfig, ax = plt.subplots(figsize=(16, 9))\nfor name, nwins in mq0_wins.items():\n    stats[name] = apply_win_funs(nwins, {'median': np.median, '75': functools.partial(np.percentile, q=95)})\n    x_lim = [j * size  for j in range(len(stats[name]))]\n    ax.plot(x_lim, [x['median'] for x in stats[name]], label=name, color=colors[i])\n    ax.plot(x_lim, [x['75'] for x in stats[name]], '--', color=colors[i])\n    i += 1\n#ax.set_ylim(0, 40)\nax.legend()\nax.set_xlabel('Genomic location in the downloaded segment', fontsize='xx-large')\nax.set_ylabel('MQ0', fontsize='xx-large')\nfig.suptitle('Distribution of MQ0 along the genome', fontsize='xx-large')\nfig.savefig('MQ0.png')\n\n\ndef get_sample_relation(recs, f1, f2):\n    rel = defaultdict(int)\n    for rec in recs:\n        if not rec.is_snp:\n             continue\n        for pos in range(len(rec.genotypes)):\n            v1 = f1(rec, pos)\n            v2 = f2(rec, pos)\n            if v1 is None or v2 == np.iinfo(type(v2)).min:\n                continue  # We ignore Nones\n            rel[(v1, v2)] += 1\n            # careful with the size, floats: round?\n        #break\n    return rel\n\n\nrels = {}\nfor name in names:\n    recs = VCF(name)\n    rels[name] = get_sample_relation(\n        recs,\n        lambda rec, pos: 1 if rec.genotypes[pos][0] != rec.genotypes[pos][1] else 0,\n        lambda rec, pos: rec.format('DP')[pos][0])\n\n# +\nfig, ax = plt.subplots(figsize=(16, 9), dpi=300, tight_layout=True)\n\ndef plot_hz_rel(dps, ax, ax2, name, rel):\n    frac_hz = []\n    cnt_dp = []\n    for dp in dps:\n        hz = 0.0\n        cnt = 0\n\n        for khz, kdp in rel.keys():\n            if kdp != dp:\n                continue\n            cnt += rel[(khz, dp)]\n            if khz == 1:\n                hz += rel[(khz, dp)]\n        frac_hz.append(hz / cnt)\n        cnt_dp.append(cnt)\n    ax.plot(dps, frac_hz, label=name)\n    ax2.plot(dps, cnt_dp, '--', label=name)\n\nax2 = ax.twinx()\nfor name, rel in rels.items():\n    dps = list(set([x[1] for x in rel.keys()]))\n    dps.sort()\n    plot_hz_rel(dps, ax, ax2, name, rel)\nax.set_xlim(0, 75)\nax.set_ylim(0, 0.2)\nax2.set_ylabel('Quantity of calls', fontsize='xx-large')\nax.set_ylabel('Fraction of Heterozygote calls', fontsize='xx-large')\nax.set_xlabel('Sample Read Depth (DP)', fontsize='xx-large')\nax.legend()\nfig.suptitle('Number of calls per depth and fraction of calls which are Hz',\n             fontsize='xx-large')\nfig.savefig('hz.png')\n\n# -\n\ndef get_variant_relation(recs, f1, f2):\n    rel = defaultdict(int)\n    for rec in recs:\n        if not rec.is_snp:\n             continue\n        try:\n            v1 = f1(rec)\n            v2 = f2(rec)\n            if v1 is None or v2 is None:\n                continue  # We ignore Nones\n            rel[(v1, v2)] += 1\n            #careful with the size, floats: round?\n        except:\n            # This is outside the domain (typically None)\n            pass\n    return rel\n\n\n# +\naccepted_eff = ['INTERGENIC', 'INTRON', 'NON_SYNONYMOUS_CODING', 'SYNONYMOUS_CODING']\n\ndef eff_to_int(rec):\n    try:\n        annot = rec.INFO['EFF']\n        master_type = annot.split('(')[0]\n        return accepted_eff.index(master_type)\n    except ValueError:\n        return len(accepted_eff)\n\n\n# -\n\neff_mq0s = {}\nfor name in names:\n    recs = VCF(name)\n    eff_mq0s[name] = get_variant_relation(\n        recs,\n        lambda r: eff_to_int(r), lambda r: int(r.INFO['DP']))\n\nfig, ax = plt.subplots(figsize=(16,9), dpi=300, tight_layout=True)\nname = 'standard.vcf.gz'\nbp_vals = [[] for x in range(len(accepted_eff) + 1)]\nfor k, cnt in eff_mq0s[name].items():\n    my_eff, mq0 = k\n    bp_vals[my_eff].extend([mq0] * cnt)\n    #memory usage\n#print(bp_vals[-2])\nsns.boxplot(data=bp_vals, sym='', ax=ax)\nax.set_xticklabels(accepted_eff + ['OTHER'])\nax.set_ylabel('DP (variant)', fontsize='xx-large')\nfig.suptitle('Distribution of variant DP per SNP type',\n             fontsize='xx-large')\nfig.savefig('eff.png')\n\n"
  },
  {
    "path": "Chapter03/LCT.bed",
    "content": "track name=gene description=\"Gene information\"\n2\t135836529\t135837180\tENSE00002202258\t0\t-\n2\t135833110\t135833190\tENSE00001660765\t0\t-\n2\t135829592\t135829676\tENSE00001731451\t0\t-\n2\t135823900\t135824003\tENSE00001659892\t0\t-\n2\t135822019\t135822098\tENSE00001777620\t0\t-\n2\t135817340\t135818061\tENSE00001602826\t0\t-\n2\t135812310\t135812956\tENSE00000776576\t0\t-\n2\t135808442\t135809993\tENSE00001008768\t0\t-\n2\t135807127\t135807396\tENSE00000776573\t0\t-\n2\t135804766\t135805057\tENSE00000776572\t0\t-\n2\t135803929\t135804128\tENSE00000776571\t0\t-\n2\t135800606\t135800809\tENSE00000776570\t0\t-\n2\t135798028\t135798138\tENSE00003515081\t0\t-\n2\t135794640\t135794775\tENSE00001630333\t0\t-\n2\t135790657\t135790881\tENSE00001667885\t0\t-\n2\t135789570\t135789798\tENSE00001728878\t0\t-\n2\t135787839\t135788544\tENSE00001653704\t0\t-\n2\t135812310\t135812959\tENSE00001745158\t0\t-\n2\t135808442\t135809993\tENSE00001008768\t0\t-\n2\t135807127\t135807396\tENSE00000776573\t0\t-\n2\t135804766\t135805057\tENSE00000776572\t0\t-\n2\t135803929\t135804128\tENSE00000776571\t0\t-\n2\t135798028\t135798138\tENSE00003459353\t0\t-\n2\t135794336\t135794775\tENSE00001635523\t0\t-\n2\t135810168\t135810279\tENSE00001438557\t0\t-\n2\t135820190\t135820639\tENSE00001732580\t0\t+\n2\t135821674\t135823087\tENSE00001695040\t0\t+\n2\t135836529\t135837180\tNM_002299.2.1\t0\t-\n2\t135833110\t135833190\tNM_002299.2.2\t0\t-\n2\t135829592\t135829676\tNM_002299.2.3\t0\t-\n2\t135823900\t135824003\tNM_002299.2.4\t0\t-\n2\t135822019\t135822098\tNM_002299.2.5\t0\t-\n2\t135817340\t135818061\tNM_002299.2.6\t0\t-\n2\t135812310\t135812956\tNM_002299.2.7\t0\t-\n2\t135808442\t135809993\tNM_002299.2.8\t0\t-\n2\t135807127\t135807396\tNM_002299.2.9\t0\t-\n2\t135804766\t135805057\tNM_002299.2.10\t0\t-\n2\t135803929\t135804128\tNM_002299.2.11\t0\t-\n2\t135800606\t135800809\tNM_002299.2.12\t0\t-\n2\t135798028\t135798138\tNM_002299.2.13\t0\t-\n2\t135794640\t135794775\tNM_002299.2.14\t0\t-\n2\t135790657\t135790881\tNM_002299.2.15\t0\t-\n2\t135789570\t135789798\tNM_002299.2.16\t0\t-\n2\t135787844\t135788544\tNM_002299.2.17\t0\t-\n2\t135836529\t135837169\tCCDS2178.117\t0\t-\n2\t135833110\t135833190\tCCDS2178.116\t0\t-\n2\t135829592\t135829676\tCCDS2178.115\t0\t-\n2\t135823900\t135824003\tCCDS2178.114\t0\t-\n2\t135822019\t135822098\tCCDS2178.113\t0\t-\n2\t135817340\t135818061\tCCDS2178.112\t0\t-\n2\t135812310\t135812956\tCCDS2178.111\t0\t-\n2\t135808442\t135809993\tCCDS2178.110\t0\t-\n2\t135807127\t135807396\tCCDS2178.19\t0\t-\n2\t135804766\t135805057\tCCDS2178.18\t0\t-\n2\t135803929\t135804128\tCCDS2178.17\t0\t-\n2\t135800606\t135800809\tCCDS2178.16\t0\t-\n2\t135798028\t135798138\tCCDS2178.15\t0\t-\n2\t135794640\t135794775\tCCDS2178.14\t0\t-\n2\t135790657\t135790881\tCCDS2178.13\t0\t-\n2\t135789570\t135789798\tCCDS2178.12\t0\t-\n2\t135788323\t135788544\tCCDS2178.11\t0\t-\n"
  },
  {
    "path": "Chapter03/Processing_BED_with_HTSeq.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nfrom collections import defaultdict\nimport re\nimport HTSeq\n\nlct_bed = HTSeq.BED_Reader('LCT.bed')\n\n# +\nfeature_types = defaultdict(int)\n\nfor rec in lct_bed:\n    last_rec = rec\n    feature_types[re.search('([A-Z]+)', rec.name).group(0)] += 1\n\nprint(feature_types)\n\n#Code specific to this dataset, document\n# -\n\nprint(last_rec)\nprint(last_rec.name)\nprint(type(last_rec))\ninterval = last_rec.iv\nprint(interval)\nprint(type(interval))\n\n# +\nprint(interval.chrom, interval.start, interval.end)\nprint(interval.strand)\nprint(interval.length)\nprint(interval.start_d)\nprint(interval.start_as_pos)\nprint(type(interval.start_as_pos))\n\n#talk about overlaps\n\n# -\n\nexon_start = None\nexon_end = None\nsizes = []\nfor rec in lct_bed:\n    if not rec.name.startswith('CCDS'):\n        continue\n    interval = rec.iv\n    exon_start = min(interval.start, exon_start or interval.start)\n    exon_end = max(interval.length, exon_end or interval.end)\n    sizes.append(interval.length)\nsizes.sort()\nprint(\"Num exons: %d / Begin: %d / End %d\" % (len(sizes), exon_start, exon_end))\nprint(\"Smaller exon: %d / Larger exon: %d / Mean size: %.1f\" % (sizes[0], sizes[-1], sum(sizes)/len(sizes)))\n\n\n"
  },
  {
    "path": "Chapter03/Working_with_BAM.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Getting the necessary data\n\n# You just need to do this only once\n\n# !rm -f NA18489.chrom20.ILLUMINA.bwa.YRI.exome.20121211.bam 2>/dev/null\n# !rm -f NA18489.chrom20.ILLUMINA.bwa.YRI.exome.20121211.bam.bai 2>/dev/null\n# !wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/exome_alignment/NA18489.chrom20.ILLUMINA.bwa.YRI.exome.20121211.bam\n# !wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/exome_alignment/NA18489.chrom20.ILLUMINA.bwa.YRI.exome.20121211.bam.bai\n\n# # The recipe\n\n# +\n#pip install pysam\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport pysam\n# -\n\nbam = pysam.AlignmentFile('NA18489.chrom20.ILLUMINA.bwa.YRI.exome.20121211.bam', 'rb')\n\nheaders = bam.header\nfor record_type, records in headers.items():\n    print (record_type)\n    for i, record in enumerate(records):\n        if type(record) == dict:\n            print('\\t%d' % (i + 1))\n            for field, value in record.items():\n                print('\\t\\t%s\\t%s' % (field, value))\n        else:\n            print('\\t\\t%s' % record)\n\n#0-based\nfor rec in bam:\n    if rec.cigarstring.find('M') > -1 and rec.cigarstring.find('S') > -1 and not rec.is_unmapped and not rec.mate_is_unmapped:\n        break\nprint(rec.query_name, rec.reference_id, bam.getrname(rec.reference_id), rec.reference_start, rec.reference_end)\nprint(rec.cigarstring)\nprint(rec.query_alignment_start, rec.query_alignment_end, rec.query_alignment_length)\nprint(rec.next_reference_id, rec.next_reference_start, rec.template_length)\nprint(rec.is_paired, rec.is_proper_pair, rec.is_unmapped, rec.mapping_quality)\nprint(rec.query_qualities)\nprint(rec.query_alignment_qualities)\nprint(rec.query_sequence)\n\ncounts = [0] * 76\nfor n, rec in enumerate(bam.fetch('20', 0, 10000000)):\n    for i in range(rec.query_alignment_start, rec.query_alignment_end):\n        counts[i] += 1\nfreqs = [100 * x / (n + 1) for x in counts]\nfig, ax = plt.subplots(figsize=(16,9), dpi=300, tight_layout=True)\nax.plot(range(1, 77), freqs)\nax.set_xlabel('Read distance', fontsize='xx-large')\nax.set_ylabel('PHRED score', fontsize='xx-large')\nfig.suptitle('Percentage of mapped calls as a function of the position from the start of the sequencer read', fontsize='xx-large')\nfig.savefig('map_perc.png')\n\nphreds = defaultdict(list)\nfor rec in bam.fetch('20', 0, None):\n    for i in range(rec.query_alignment_start, rec.query_alignment_end):\n        phreds[i].append(rec.query_qualities[i])\n\nmaxs = [max(phreds[i]) for i in range(76)]\ntops = [np.percentile(phreds[i], 95) for i in range(76)]\nmedians = [np.percentile(phreds[i], 50) for i in range(76)]\nbottoms = [np.percentile(phreds[i], 5) for i in range(76)]\nmedians_fig = [x - y for x, y in zip(medians, bottoms)]\ntops_fig = [x - y for x, y in zip(tops, medians)]\nmaxs_fig = [x - y for x, y in zip(maxs, tops)]\n\nfig, ax = plt.subplots(figsize=(16,9),dpi=300, tight_layout=True)\nax.stackplot(range(1, 77), (bottoms, medians_fig, tops_fig, maxs_fig))\nax.plot(range(1, 77), maxs, 'k-')\nax.set_xlabel('Read distance', fontsize='xx-large')\nax.set_ylabel('PHRED score', fontsize='xx-large')\nfig.suptitle('Distribution of PHRED scores as a function of the position in the read', fontsize='xx-large')\nfig.savefig('phred2.png')\n"
  },
  {
    "path": "Chapter03/Working_with_FASTQ.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Getting the necessary data\n\n# You just need to download this ~28 MB file only once\n\n# !rm -f SRR003265.filt.fastq.gz 2>/dev/null\n# !wget -nd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265.filt.fastq.gz\n\n# # The recipe\n\n# +\nfrom collections import defaultdict\nimport gzip\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom Bio import SeqIO\n# -\n\nrecs = SeqIO.parse(gzip.open('SRR003265.filt.fastq.gz', 'rt', encoding='utf-8'), 'fastq')\nrec = next(recs)\nprint(rec.id, rec.description, rec.seq)\nprint(rec.letter_annotations)\n\nrecs = SeqIO.parse(gzip.open('SRR003265.filt.fastq.gz', 'rt', encoding='utf-8'), 'fastq')\ncnt = defaultdict(int)\nfor rec in recs:\n    for letter in rec.seq:\n        cnt[letter] += 1\ntot = sum(cnt.values())\nfor letter, cnt in cnt.items():\n    print('%s: %.2f %d' % (letter, 100 * cnt / tot, cnt))\n\nrecs = SeqIO.parse(gzip.open('SRR003265.filt.fastq.gz', 'rt', encoding='UTF-8'), 'fastq')\nn_cnt = defaultdict(int)\nfor rec in recs:\n    for i, letter in enumerate(rec.seq):\n        pos = i + 1\n        if letter == 'N':\n            n_cnt[pos] += 1\nseq_len = max(n_cnt.keys())\npositions = range(1, seq_len + 1)\nfig, ax = plt.subplots(figsize=(16, 9), tight_layout=True, dpi=300)\nfig.suptitle('Number of N calls as a function of the distance from the start of the sequencer read', fontsize='xx-large')\nax.plot(positions, [n_cnt[x] for x in positions])\nax.set_xlim(1, seq_len)\nax.set_xlabel('Read distance', fontsize='xx-large')\nax.set_ylabel('Number of N Calls', fontsize='xx-large')\nfig.savefig('n_calls.png')\n\nrecs = SeqIO.parse(gzip.open('SRR003265.filt.fastq.gz', 'rt', encoding='utf-8'), 'fastq')\ncnt_qual = defaultdict(int)\nfor rec in recs:\n    for i, qual in enumerate(rec.letter_annotations['phred_quality']):\n        if i < 25:\n            continue\n        cnt_qual[qual] += 1\ntot = sum(cnt_qual.values())\nfor qual, cnt in cnt_qual.items():\n    print('%d: %.2f %d' % (qual, 100. * cnt / tot, cnt))\n\nrecs = SeqIO.parse(gzip.open('SRR003265.filt.fastq.gz', 'rt', encoding='utf-8'), 'fastq')\nqual_pos = defaultdict(list)\nfor rec in recs:\n    for i, qual in enumerate(rec.letter_annotations['phred_quality']):\n        if i < 25 or qual == 40:\n            continue\n        pos = i + 1\n        qual_pos[pos].append(qual)\nvps = []\nposes = list(qual_pos.keys())\nposes.sort()\nfor pos in poses:\n    vps.append(qual_pos[pos])\nfig, ax = plt.subplots(figsize=(16,9), dpi=300, tight_layout=True)\nsns.boxplot(data=vps, ax=ax)\nax.set_xticklabels([str(x) for x in range(26, max(qual_pos.keys()) + 1)])\nax.set_xlabel('Read distance', fontsize='xx-large')\nax.set_ylabel('PHRED score', fontsize='xx-large')\nfig.suptitle('Distribution of PHRED scores as a function of read distance', fontsize='xx-large')\nfig.savefig('phred.png')\n\n# # There is more...\n\n# ## Do this to download the paired end data\n\n# Be careful as this will be 1GB of data (and fully optional)\n\n# !rm -f SRR003265_1.filt.fastq.gz 2>/dev/null\n# !rm -f SRR003265_2.filt.fastq.gz 2>/dev/null\n# !wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_1.filt.fastq.gz\n# !wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_2.filt.fastq.gz\n\n# +\nf1 = gzip.open('SRR003265_1.filt.fastq.gz', 'rt', encoding='utf8')\nf2 = gzip.open('SRR003265_2.filt.fastq.gz', 'rt', encoding='utf8')\nrecs1 = SeqIO.parse(f1, 'fastq')\nrecs2 = SeqIO.parse(f2, 'fastq')\ncnt = 0\nfor rec1, rec2 in zip(recs1, recs2):\n    cnt +=1\n\nprint('Number of pairs: %d' % cnt)\n# -\n\n\n"
  },
  {
    "path": "Chapter03/Working_with_VCF.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Getting the necessary data\n\n# You just need to do this only once\n\n# !rm -f genotypes.vcf.gz 2>/dev/null\n# !tabix -fh ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502/supporting/vcf_with_sample_level_annotation/ALL.chr22.phase3_shapeit2_mvncall_integrated_v5_extra_anno.20130502.genotypes.vcf.gz 22:1-17000000|bgzip -c > genotypes.vcf.gz\n# !tabix -p vcf genotypes.vcf.gz\n\n# +\nfrom collections import defaultdict\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom cyvcf2 import VCF\n\n# +\nv = VCF('genotypes.vcf.gz')\nrec = next(v)\nprint('Variant Level information')\ninfo = rec.INFO\nfor info in rec.INFO:\n    print(info)\n\nprint('Sample Level information')\nfor fmt in rec.FORMAT:\n    print(fmt)\n\n# +\nv = VCF('genotypes.vcf.gz')\nsamples = v.samples\nprint(len(samples))  # Order change\n\nvariant = next(v)\nprint(variant.CHROM, variant.POS, variant.ID, variant.REF, variant.ALT, variant.QUAL, variant.FILTER)\nprint(variant.INFO)\nprint(variant.FORMAT)\nprint(variant.is_snp)\n\n#rec.format('DP')\n#rec.format('GT')\n\nstr_alleles = variant.gt_bases[0]\nalleles = variant.genotypes[0][0:2]\nis_phased = variant.genotypes[0][2]\nprint(str_alleles, alleles, is_phased)\nprint(variant.format('DP')[0])\n\n# +\nf = VCF('genotypes.vcf.gz')\n\nmy_type = defaultdict(int)\nnum_alts = defaultdict(int)\n\nfor variant in f:\n    my_type[variant.var_type, variant.var_subtype] += 1\n    if variant.var_type == 'snp':\n        num_alts[len(variant.ALT)] += 1\nprint(my_type)\nprint(num_alts)\n\n# +\nf = VCF('genotypes.vcf.gz')\n\nsample_dp = defaultdict(int)\nfor variant in f:\n    if not variant.is_snp or len(variant.ALT) != 1:\n        continue\n    for dp in variant.format('DP'):\n        #dp = int(dp)\n        sample_dp[dp] += 1\n# -\n\ndps = list(sample_dp.keys())\ndps.sort()\ndp_dist = [sample_dp[x] for x in dps]\nfig, ax = plt.subplots(figsize=(16, 9))\nax.plot(dp_dist[:50], 'r')\nax.axvline(dp_dist.index(max(dp_dist)))\n\n\n"
  },
  {
    "path": "Chapter04/2L.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\n# %matplotlib inline\n\nfrom collections import defaultdict\nimport gzip\n\nimport numpy as np\nimport matplotlib.pylab as plt\n# -\n\nnum_parents = 8\ndp_2L = np.load(gzip.open('DP_2L.npy.gz', 'rb'))\ndp_2L.shape\n\nfor i in range(num_parents):\n    print(np.median(dp_2L[:,i]), np.median(dp_2L[50000:150000,i]))\n\nwindow_size = 200000\nparent_DP_windows = [defaultdict(list) for i in range(num_parents)]\n\n\n# +\ndef insert_in_window(row):\n    for parent in range(num_parents):\n        parent_DP_windows[parent][row[-1] // window_size].append(row[parent])\n\ninsert_in_window_v = np.vectorize(insert_in_window, signature='(n)->()')\n_ = insert_in_window_v(dp_2L)\n# -\n\nfig, axs = plt.subplots(2, num_parents // 2, figsize=(16, 9), sharex=True, sharey=True, squeeze=True)\nfor parent in range(num_parents):\n    ax = axs[parent // 4][parent % 4]\n    parent_data = parent_DP_windows[parent]\n    ax.set_ylim(10, 40)\n    ax.plot(*zip(*[(win*window_size, np.mean(lst)) for win, lst in parent_data.items()]), '.')\n\n\n"
  },
  {
    "path": "Chapter04/Exploration.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport gzip\nimport pickle\nimport random\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import scatter_matrix\n\n# %matplotlib inline\n# -\n\nfit = np.load(gzip.open('balanced_fit.npy.gz', 'rb'))\nordered_features = np.load(open('ordered_features', 'rb'), allow_pickle=True)\nnum_features = len(ordered_features)\nfit_df = pd.DataFrame(fit, columns=ordered_features + ['pos', 'error'])\nnum_samples = 80\ndel fit\n\nfig,ax = plt.subplots(figsize=(16,9))\n_ = fit_df.hist(column=ordered_features, ax=ax)\n\nfit_df['MeanDP'] = fit_df['DP'] / 80\nfig, ax = plt.subplots()\n_ = ax.hist(fit_df[fit_df['MeanDP']<50]['MeanDP'], bins=100)\n\nerrors_df = fit_df[fit_df['error'] == 1]\nok_df = fit_df[fit_df['error'] == 0]\n\nok_qual_above_df = ok_df[ok_df['QUAL']>0.005]\nerrors_qual_above_df = errors_df[errors_df['QUAL']>0.005]\nprint(ok_df.size, errors_df.size, ok_qual_above_df.size, errors_qual_above_df.size)\nprint(ok_qual_above_df.size / ok_df.size, errors_qual_above_df.size / errors_df.size)\n\nok_qd_above_df = ok_df[ok_df['QD']>0.05]\nerrors_qd_above_df = errors_df[errors_df['QD']>0.05]\nprint(ok_df.size, errors_df.size, ok_qd_above_df.size, errors_qd_above_df.size)\nprint(ok_qd_above_df.size / ok_df.size, errors_qd_above_df.size / errors_df.size)\n\nnot_bad_area_errors_df = errors_df[(errors_df['QUAL']<0.005)&(errors_df['QD']<0.05)]\n_ = scatter_matrix(not_bad_area_errors_df[['FS', 'ReadPosRankSum', 'MQ', 'HRun']], diagonal='kde', figsize=(16, 9), alpha=0.02)\n\nnot_bad_area_ok_df = ok_df[(ok_df['QUAL']<0.005)&(ok_df['QD']<0.05)]\n_ = scatter_matrix(not_bad_area_ok_df[['FS', 'ReadPosRankSum', 'MQ', 'HRun']], diagonal='kde', figsize=(16, 9), alpha=0.02)\n\nall_fit_df = pd.DataFrame(np.load(gzip.open('feature_fit.npy.gz', 'rb')), columns=ordered_features + ['pos', 'error'])\npotentially_good_corner_df = all_fit_df[(all_fit_df['QUAL']<0.005)&(all_fit_df['QD']<0.05)]\nall_errors_df=all_fit_df[all_fit_df['error'] == 1]\nprint(len(all_fit_df), len(all_errors_df), len(all_errors_df) / len(all_fit_df))\n\npotentially_good_corner_errors_df = potentially_good_corner_df[potentially_good_corner_df['error'] == 1]\nprint(len(potentially_good_corner_df), len(potentially_good_corner_errors_df), len(potentially_good_corner_errors_df) / len(potentially_good_corner_df))\nprint(len(potentially_good_corner_df)/len(all_fit_df))\n\n\n"
  },
  {
    "path": "Chapter04/Mendel.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport random\nimport matplotlib.pyplot as plt\n\n# # Mendelian simulations\n\nnum_sims = 100000\nnum_ofs = 20\n\n# +\nnum_hets_AA_AT = []\nfor sim in range(num_sims):\n    sim_hets = 0\n    for ofs in range(20):\n        sim_hets += 1 if random.choice([0, 1]) == 1 else 0\n    num_hets_AA_AT.append(sim_hets)\n    \nfig, ax = plt.subplots(1,1, figsize=(16,9))\nax.hist(num_hets_AA_AT, bins=range(20))\nprint(len([num_hets for num_hets in num_hets_AA_AT if num_hets==20]))\n# -\n\nnum_AAs_AT_AT = []\nnum_hets_AT_AT = []\nfor sim in range(num_sims):\n    sim_AAs = 0\n    sim_hets = 0\n    for ofs in range(20):\n        derived_cnt = sum(random.choices([0, 1], k=2))\n        sim_AAs += 1 if derived_cnt == 0 else 0\n        sim_hets += 1 if derived_cnt == 1 else 0\n    num_AAs_AT_AT.append(sim_AAs)\n    num_hets_AT_AT.append(sim_hets)\nfig, ax = plt.subplots(1,1, figsize=(16,9))\nax.hist([num_hets_AT_AT, num_AAs_AT_AT], histtype='step', fill=False, bins=range(20), label=['het', 'AA'])\nplt.legend()\n\n# # Balanced output\n\n# +\nimport gzip\nimport pickle\nimport random\n\nimport numpy as np\n# -\n\nmendelian_errors = pickle.load(gzip.open('mendelian_errors.pickle.gz', 'rb'))\nfeature_fit = np.load(gzip.open('feature_fit.npy.gz', 'rb'))\nordered_features = np.load(open('ordered_features', 'rb'), allow_pickle=True)\nnum_features = len(ordered_features)\n\nlen(mendelian_errors), len(list(filter(lambda x: x[0] > 0,mendelian_errors.values())))\n\ntotal_observations = len(mendelian_errors)\nerror_observations = len(list(filter(lambda x: x[0] > 0,mendelian_errors.values())))\nok_observations = total_observations - error_observations\nfraction_errors = error_observations/total_observations\nprint (total_observations, ok_observations, error_observations, 100*fraction_errors)\ndel mendelian_errors\n\n# +\nprob_ok_choice = error_observations / ok_observations\n\ndef accept_entry(row):\n    if row[-1] == 1:\n        return True\n    return random.random() <= prob_ok_choice\n\naccept_entry_v = np.vectorize(accept_entry, signature='(i)->()')\n\naccepted_entries = accept_entry_v(feature_fit)\nbalanced_fit = feature_fit[accepted_entries]\ndel feature_fit\nbalanced_fit.shape\nlen([x for x in balanced_fit if x[-1] == 1]), len([x for x in balanced_fit if x[-1] == 0])\n# -\n\nnp.save(gzip.open('balanced_fit.npy.gz', 'wb'), balanced_fit, allow_pickle=False, fix_imports=False)\n\n\n"
  },
  {
    "path": "Chapter04/Preparation.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# !wget ftp://ngs.sanger.ac.uk/production/ag1000g/phase1/AR3/variation/crosses/ar3/hdf5/ag1000g.crosses.phase1.ar3sites.3L.h5\n# !wget ftp://ngs.sanger.ac.uk/production/ag1000g/phase1/AR3/variation/crosses/ar3/hdf5/ag1000g.crosses.phase1.ar3sites.2L.h5\n\n\n# +\nimport pickle\nimport gzip\nimport random\n\nimport numpy as np\nimport h5py\nimport pandas as pd\n# -\n\nsamples = pd.read_csv('samples.tsv', sep='\\t')\nprint(len(samples))\nprint(samples['cross'].unique())\nprint(samples[samples['cross'] == 'cross-29-2'][['id', 'function']])\nprint(len(samples[samples['cross'] == 'cross-29-2']))\nprint(samples[samples['function'] == 'parent'])\n\n# # Chromosome arm 3L\n\n# +\nh5_3L = h5py.File('ag1000g.crosses.phase1.ar3sites.3L.h5', 'r')\nsamples_hdf5 = list(map(lambda sample: sample.decode('utf-8'), h5_3L['/3L/samples']))\n\ncalldata_genotype = h5_3L['/3L/calldata/genotype']\n\nMQ0 = h5_3L['/3L/variants/MQ0']\nMQ = h5_3L['/3L/variants/MQ']\nQD = h5_3L['/3L/variants/QD']\nCoverage = h5_3L['/3L/variants/Coverage']\nCoverageMQ0 = h5_3L['/3L/variants/CoverageMQ0']\nHaplotypeScore = h5_3L['/3L/variants/HaplotypeScore']\nQUAL = h5_3L['/3L/variants/QUAL']\nFS = h5_3L['/3L/variants/FS']\nDP = h5_3L['/3L/variants/DP']\nHRun = h5_3L['/3L/variants/HRun']\nReadPosRankSum = h5_3L['/3L/variants/ReadPosRankSum']\nmy_features = {\n    'MQ': MQ,\n    'QD': QD,\n    'Coverage': Coverage,\n    'HaplotypeScore': HaplotypeScore,\n    'QUAL': QUAL,\n    'FS': FS,\n    'DP': DP,\n    'HRun': HRun,\n    'ReadPosRankSum': ReadPosRankSum\n}\n\nnum_features = len(my_features)\nnum_alleles = h5_3L['/3L/variants/num_alleles']\nis_snp = h5_3L['/3L/variants/is_snp']\nPOS = h5_3L['/3L/variants/POS']\n\n\n# -\n\n#compute mendelian errors (biallelic)\ndef compute_mendelian_errors(mother, father, offspring):\n    num_errors = 0\n    num_ofs_problems = 0\n    if len(mother.union(father)) == 1:\n        # Mother and father are homo and the same\n        for ofs in offspring:\n            if len(ofs) == 2:\n                # Offspring is het\n                num_errors += 1\n                num_ofs_problems += 1\n            elif len(ofs.intersection(mother)) == 0:\n                # Offspring is homo, but opposite from parents\n                num_errors += 2\n                num_ofs_problems += 1\n    elif len(mother) == 1 and len(father) == 1:\n        # Mother and father are homo and different\n        for ofs in offspring:\n            if len(ofs) == 1:\n                # Homo, should be het\n                num_errors += 1\n                num_ofs_problems += 1\n    elif len(mother) == 2 and len(father) == 2:\n        # Both are het, individual offspring can be anything\n        pass\n    else:\n        # One is het, the other is homo\n        homo = mother if len(mother) == 1 else father\n        for ofs in offspring:\n            if len(ofs) == 1 and not ofs.intersection(homo):\n                # homo, but not including the allele from parent that is homo\n                num_errors += 1\n                num_ofs_problems += 1\n    return num_errors, num_ofs_problems\n\n\n# +\ndef acceptable_position_to_genotype():\n    for i, genotype in enumerate(calldata_genotype):\n        if is_snp[i] and num_alleles[i] == 2:\n            if len(np.where(genotype == -1)[0]) > 1:\n                # Missing data\n                continue\n            yield i\n\ndef acumulate(fun):\n    acumulator = {}\n    for res in fun():\n        if res is not None:\n            acumulator[res[0]] = res[1]\n    return acumulator\n\n\n# +\ndef get_family_indexes(samples_hdf5, cross_pd):\n    offspring = []\n    for i, individual in cross_pd.T.iteritems():\n        index = samples_hdf5.index(individual.id)\n        if individual.function == 'parent':\n            if individual.sex == 'M':\n                father = index\n            else:\n                mother = index\n        else:\n            offspring.append(index)\n    return {'mother': mother, 'father': father, 'offspring': offspring}\n\ncross_pd = samples[samples['cross'] == 'cross-29-2']\nfamily_indexes = get_family_indexes(samples_hdf5, cross_pd)\n\n# +\nmother_index = family_indexes['mother']\nfather_index = family_indexes['father']\noffspring_indexes = family_indexes['offspring']\nall_errors = {}\n\n\ndef get_mendelian_errors():\n    for i in acceptable_position_to_genotype():\n        genotype = calldata_genotype[i]\n        mother = set(genotype[mother_index])\n        father = set(genotype[father_index])\n        offspring = [set(genotype[ofs_index]) for ofs_index in offspring_indexes]\n        my_mendelian_errors = compute_mendelian_errors(mother, father, offspring)\n        yield POS[i], my_mendelian_errors\n\nmendelian_errors = acumulate(get_mendelian_errors)\n\npickle.dump(mendelian_errors, gzip.open('mendelian_errors.pickle.gz', 'wb'))\n\n# +\nordered_positions = sorted(mendelian_errors.keys())\nordered_features = sorted(my_features.keys())  #XXX on code?\nnum_features = len(ordered_features)\nfeature_fit = np.empty((len(ordered_positions), len(my_features) + 2), dtype=float)\n\nfor column, feature in enumerate(ordered_features):  # 'Strange' order\n    print(feature)\n    current_hdf_row = 0\n    for row, genomic_position in enumerate(ordered_positions):\n        while POS[current_hdf_row] < genomic_position:\n            current_hdf_row +=1\n        feature_fit[row, column] = my_features[feature][current_hdf_row]\n\nfor row, genomic_position in enumerate(ordered_positions):\n    feature_fit[row, num_features] = genomic_position\n    feature_fit[row, num_features + 1] = 1 if mendelian_errors[genomic_position][0] > 0 else 0\n\nnp.save(gzip.open('feature_fit.npy.gz', 'wb'), feature_fit, allow_pickle=False, fix_imports=False)\npickle.dump(ordered_features, open('ordered_features', 'wb'))\n# -\n\n# # Chromosome arm 2L\n\nh5_2L = h5py.File('ag1000g.crosses.phase1.ar3sites.2L.h5', 'r')\nsamples_hdf5 = list(map(lambda sample: sample.decode('utf-8'), h5_2L['/2L/samples']))\ncalldata_DP = h5_2L['/2L/calldata/DP']\nPOS = h5_2L['/2L/variants/POS']\n\n\n# +\ndef get_parent_indexes(samples_hdf5, parents_pd):\n    parents = []\n    for i, individual in parents_pd.T.iteritems():\n        index = samples_hdf5.index(individual.id)\n        parents.append(index)\n    return parents\n\nparents_pd = samples[samples['function'] == 'parent']\nparent_indexes = get_parent_indexes(samples_hdf5, parents_pd)\n# -\n\nall_dps = []\nfor i, pos in enumerate(POS):\n    if random.random() > 0.01:\n        continue\n    pos_dp = calldata_DP[i]\n    parent_pos_dp = [pos_dp[parent_index] for parent_index in parent_indexes]\n    all_dps.append(parent_pos_dp + [pos])\nall_dps = np.array(all_dps)\nnp.save(gzip.open('DP_2L.npy.gz', 'wb'), all_dps, allow_pickle=False, fix_imports=False)\n\n\n"
  },
  {
    "path": "Chapter04/QIIME2_Metagenomics.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Important: Read this!\n#\n# This recipe does not work with the standard conda environment.\n#\n# If you are in the standard environment, do this:\n#\n# 1. Stop Jupyter\n# 2. Activate QIIME2 environment on conda\n# 3. Do `jupyter serverextension enable --py qiime2 --sys-prefix`\n# 4. Start Jupyter inside QIIME2 environment\n#\n# Note that other recipes will not work inside this environment. \n\n# # Check this out!\n#\n# This is based on on [QIIME2 Fecal Microbiota Transpant example](https://docs.qiime2.org/2018.8/tutorials/fmt/) (for the command line). You are strongly advised to read it before proceeding.\n#\n# There is an [amazing example](http://nbviewer.jupyter.org/gist/tkosciol/29de5198a4be81559a075756c2490fde) of using the Artifact API using the \"Moving Pictures\" tutorial of QIIME 2 produced by Tomasz Kościółek. I use a more convoluted approach than Tomasz's in order to go a little deeper in terms of understanding of the Python internals. That is more of a learning experience on the internals than a practical recommendatin. **My recommendation is to use Tomasz's dialect, not mine**.\n#\n\n# # Getting the data\n\n# !wget https://data.qiime2.org/2018.8/tutorials/fmt/sample_metadata.tsv\n# !wget https://data.qiime2.org/2018.8/tutorials/fmt/fmt-tutorial-demux-1-10p.qza\n# !wget https://data.qiime2.org/2018.8/tutorials/fmt/fmt-tutorial-demux-2-10p.qza\n\n# # The recipe\n\n# +\nimport pandas as pd\n\nfrom qiime2.metadata.metadata import Metadata\nfrom qiime2.metadata.metadata import CategoricalMetadataColumn\nfrom qiime2.sdk import Artifact\nfrom qiime2.sdk import PluginManager\nfrom qiime2.sdk import Result\n# -\n\npm = PluginManager()\ndemux_plugin = pm.plugins['demux']\n#demux_emp_single = demux_plugin.actions['emp_single']\ndemux_summarize = demux_plugin.actions['summarize']\npm.plugins\n\nprint(demux_summarize.description)\ndemux_summarize_signature = demux_summarize.signature\nprint(demux_summarize_signature.inputs)\nprint(demux_summarize_signature.parameters)\nprint(demux_summarize_signature.outputs)\n\n# +\nseqs1 = Result.load('fmt-tutorial-demux-1-10p.qza')\nsum_data1 = demux_summarize(seqs1)\n\nsum_data1.visualization\n\n# +\nseqs2 = Result.load('fmt-tutorial-demux-2-10p.qza')\nsum_data2 = demux_summarize(seqs2)\n\nprint(dir(sum_data2))\nprint(type(sum_data2.visualization))\nprint(dir(sum_data2.visualization))\nsum_data2.visualization\n# -\n\n#Quality control\ndada2_plugin = pm.plugins['dada2']\ndada2_denoise_single = dada2_plugin.actions['denoise_single']\nqual_control1 = dada2_denoise_single(demultiplexed_seqs=seqs1,\n                                    trunc_len=150, trim_left=13)\n\nqual_control2 = dada2_denoise_single(demultiplexed_seqs=seqs2,\n                                    trunc_len=150, trim_left=13)\n\nmetadata_plugin = pm.plugins['metadata']\nmetadata_tabulate = metadata_plugin.actions['tabulate']\nstats_meta1 = metadata_tabulate(input=qual_control1.denoising_stats.view(Metadata))\nstats_meta1.visualization\n\nstats_meta2 = metadata_tabulate(input=qual_control2.denoising_stats.view(Metadata))\nstats_meta2.visualization\n\n# +\nft_plugin = pm.plugins['feature-table']\nft_merge = ft_plugin.actions['merge']\nft_merge_seqs = ft_plugin.actions['merge_seqs']\nft_summarize = ft_plugin.actions['summarize']\nft_tab_seqs = ft_plugin.actions['tabulate_seqs']\n\ntable_merge = ft_merge(tables=[qual_control1.table, qual_control2.table])\nseqs_merge = ft_merge_seqs(data=[qual_control1.representative_sequences, qual_control2.representative_sequences])\n# -\n\nft_sum = ft_summarize(table=table_merge.merged_table)\nft_sum.visualization\n\ntab_seqs = ft_tab_seqs(data=seqs_merge.merged_data)\ntab_seqs.visualization\n\n\n"
  },
  {
    "path": "Chapter04/samples.tsv",
    "content": "id\tcross\tsex\tfunction\nAD0231-C\tcross-29-2\tF\tparent\nAD0232-C\tcross-29-2\tM\tparent\nAD0234-C\tcross-29-2\tF\tprogeny\nAD0235-C\tcross-29-2\tF\tprogeny\nAD0236-C\tcross-29-2\tF\tprogeny\nAD0237-C\tcross-29-2\tF\tprogeny\nAD0238-C\tcross-29-2\tF\tprogeny\nAD0239-C\tcross-29-2\tF\tprogeny\nAD0240-C\tcross-29-2\tM\tprogeny\nAD0241-C\tcross-29-2\tF\tprogeny\nAD0242-C\tcross-29-2\tM\tprogeny\nAD0243-C\tcross-29-2\tF\tprogeny\nAD0244-C\tcross-29-2\tF\tprogeny\nAD0245-C\tcross-29-2\tF\tprogeny\nAD0246-C\tcross-29-2\tF\tprogeny\nAD0247-C\tcross-29-2\tM\tprogeny\nAD0248-C\tcross-29-2\tF\tprogeny\nAD0249-C\tcross-29-2\tF\tprogeny\nAD0250-C\tcross-29-2\tF\tprogeny\nAD0251-C\tcross-29-2\tF\tprogeny\nAD0252-C\tcross-29-2\tF\tprogeny\nAD0253-C\tcross-29-2\tM\tprogeny\nAD0254-C\tcross-36-9\tF\tparent\nAD0255-C\tcross-36-9\tM\tparent\nAD0259-C\tcross-36-9\tM\tprogeny\nAD0260-C\tcross-36-9\tF\tprogeny\nAD0261-C\tcross-36-9\tF\tprogeny\nAD0262-C\tcross-36-9\tM\tprogeny\nAD0263-C\tcross-36-9\tM\tprogeny\nAD0265-C\tcross-36-9\tF\tprogeny\nAD0266-C\tcross-36-9\tM\tprogeny\nAD0267-C\tcross-36-9\tF\tprogeny\nAD0268-C\tcross-36-9\tM\tprogeny\nAD0269-C\tcross-36-9\tF\tprogeny\nAD0270-C\tcross-36-9\tM\tprogeny\nAD0271-C\tcross-36-9\tM\tprogeny\nAD0272-C\tcross-36-9\tF\tprogeny\nAD0273-C\tcross-36-9\tM\tprogeny\nAD0274-C\tcross-36-9\tF\tprogeny\nAD0275-C\tcross-36-9\tM\tprogeny\nAD0276-C\tcross-36-9\tF\tprogeny\nAD0305-C\tcross-42-4\tF\tparent\nAD0306-C\tcross-42-4\tM\tparent\nAD0309-C\tcross-42-4\tM\tprogeny\nAD0310-C\tcross-42-4\tM\tprogeny\nAD0311-C\tcross-42-4\tM\tprogeny\nAD0312-C\tcross-42-4\tM\tprogeny\nAD0313-C\tcross-42-4\tM\tprogeny\nAD0314-C\tcross-42-4\tM\tprogeny\nAD0315-C\tcross-42-4\tM\tprogeny\nAD0316-C\tcross-42-4\tF\tprogeny\nAD0317-C\tcross-42-4\tM\tprogeny\nAD0318-C\tcross-42-4\tM\tprogeny\nAD0319-C\tcross-42-4\tF\tprogeny\nAD0320-C\tcross-42-4\tF\tprogeny\nAD0322-C\tcross-42-4\tF\tprogeny\nAD0323-C\tcross-42-4\tF\tprogeny\nAD0347-C\tcross-46-9\tF\tparent\nAD0348-C\tcross-46-9\tM\tparent\nAD0351-C\tcross-46-9\tM\tprogeny\nAD0352-C\tcross-46-9\tF\tprogeny\nAD0353-C\tcross-46-9\tF\tprogeny\nAD0354-C\tcross-46-9\tF\tprogeny\nAD0355-C\tcross-46-9\tF\tprogeny\nAD0356-C\tcross-46-9\tM\tprogeny\nAD0357-C\tcross-46-9\tF\tprogeny\nAD0358-C\tcross-46-9\tF\tprogeny\nAD0359-C\tcross-46-9\tM\tprogeny\nAD0360-C\tcross-46-9\tF\tprogeny\nAD0361-C\tcross-46-9\tF\tprogeny\nAD0362-C\tcross-46-9\tM\tprogeny\nAD0363-C\tcross-46-9\tF\tprogeny\nAD0364-C\tcross-46-9\tM\tprogeny\nAD0365-C\tcross-46-9\tM\tprogeny\nAD0366-C\tcross-46-9\tF\tprogeny\nAD0367-C\tcross-46-9\tF\tprogeny\nAD0368-C\tcross-46-9\tF\tprogeny\nAD0369-C\tcross-46-9\tF\tprogeny\nAD0370-C\tcross-46-9\tF\tprogeny\nAD0438-C\tcross-36-9\tF\tprogeny\n"
  },
  {
    "path": "Chapter05/.gitignore",
    "content": "*.fasta\nag.db\n*gz\n*png"
  },
  {
    "path": "Chapter05/Annotations.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\n#pip install gffutils\nfrom collections import defaultdict\n\nimport gffutils\nimport sqlite3\n# -\n\n# !rm -f ag.db\n# !wget https://vectorbase.org/common/downloads/release-55/AgambiaePEST/gff/data/VectorBase-55_AgambiaePEST.gff -O gambiae.gff\n# !gzip -9 gambiae.gff\n\ntry:\n    db = gffutils.create_db('gambiae.gff.gz', 'ag.db')\nexcept sqlite3.OperationalError:\n    db = gffutils.FeatureDB('ag.db')\n\nprint(list(db.featuretypes()))\nfor feat_type in db.featuretypes():\n    print(feat_type, db.count_features_of_type(feat_type))\n\nseqids = set()\nfor e in db.all_features():\n    seqids.add(e.seqid)\nfor seqid in seqids:\n    print(seqid)\n\nnum_mRNAs = defaultdict(int)\nnum_exons = defaultdict(int)\nmax_exons = 0\nmax_span = 0\nfor seqid in seqids:\n    cnt = 0\n    for gene in db.region(seqid=seqid, featuretype='protein_coding_gene'):\n        cnt += 1\n        span = abs(gene.start - gene.end) # strand\n        if span > max_span:\n            max_span = span\n            max_span_gene = gene\n        my_mRNAs = list(db.children(gene, featuretype='mRNA'))\n        num_mRNAs[len(my_mRNAs)] += 1\n        if len(my_mRNAs) == 0:\n            exon_check = [gene]\n        else:\n            exon_check = my_mRNAs\n        for check in exon_check:\n            my_exons = list(db.children(check, featuretype='exon'))\n            num_exons[len(my_exons)] += 1\n            if len(my_exons) > max_exons:\n                max_exons = len(my_exons)\n                max_exons_gene = gene\n    print(f'seqid {seqid}, number of genes {cnt}')\nprint('Max number of exons: %s (%d)' % (max_exons_gene.id, max_exons))\nprint('Max span: %s (%d)' % (max_span_gene.id, max_span))\nprint(num_mRNAs)\nprint(num_exons)\n\n\n"
  },
  {
    "path": "Chapter05/Gene_Ontology.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n#use pip install as conda install requires a lot of downgrades at this stage\nimport pygraphviz as pgv\nfrom IPython.core.display import Image\n\n# ## The cell below comes from the Orthology notebook\n\n# +\nimport requests\n \nensembl_server = 'http://rest.ensembl.org'\n\ndef do_request(server, service, *args, **kwargs):\n    params = ''\n    for a in args:\n        if a is not None:\n            params += '/' + a\n    req = requests.get('%s/%s%s' % (server, service, params),\n                       params=kwargs,\n                       headers={'Content-Type': 'application/json'})\n \n    if not req.ok:\n        req.raise_for_status()\n    return req.json()\n\n\n# -\n\nlct_id = 'ENSG00000115850'\n\nrefs = do_request(ensembl_server, 'xrefs/id', lct_id, external_db='GO', all_levels='1')\nprint(len(refs))\nprint(refs[0].keys())\nfor ref in refs:\n    go_id = ref['primary_id']\n    details = do_request(ensembl_server, 'ontology/id', go_id)\n    print('%s %s %s' % (go_id,  details['namespace'], ref['description']))\n    print('%s\\n' % details['definition'])\n\ngo_id = 'GO:0000016'\nmy_data = do_request(ensembl_server, 'ontology/id', go_id)\nfor k, v in my_data.items():\n    if k == 'parents':\n        for parent in v:\n            print(parent)\n            parent_id = parent['accession']\n    else:\n        print('%s: %s' % (k, str(v)))\nprint()\nparent_data = do_request(ensembl_server, 'ontology/id', parent_id)\nprint(parent_id, len(parent_data['children']))\n\nrefs = do_request(ensembl_server, 'ontology/ancestors/chart', go_id)\nfor go, entry in refs.items():\n    print(go)\n    term = entry['term']\n    print('%s %s' % (term['name'], term['definition']))\n    is_a = entry.get('is_a', [])\n    print('\\t is a: %s\\n' % ', '.join([x['accession'] for x in is_a]))\n\n\ndef get_upper(go_id):\n    parents = {}\n    node_data = {}\n    refs = do_request(ensembl_server, 'ontology/ancestors/chart', go_id)\n    for ref, entry in refs.items():\n        my_data = do_request(ensembl_server, 'ontology/id', ref)\n        node_data[ref] = {'name': entry['term']['name'], 'children': my_data['children']}\n        try:\n            parents[ref] = [x['accession'] for x in entry['is_a']]\n        except KeyError:\n            pass  # Top of hierarchy\n    return parents, node_data\n\n\nparents, node_data = get_upper(go_id)\n\ng = pgv.AGraph(directed=True)\nfor ofs, ofs_parents in parents.items():\n    ofs_text = '%s\\n(%s)' % (node_data[ofs]['name'].replace(', ', '\\n'), ofs)\n    for parent in ofs_parents:\n        parent_text = '%s\\n(%s)' % (node_data[parent]['name'].replace(', ', '\\n'), parent)\n        children = node_data[parent]['children']\n        if len(children) < 3:\n            for child in children:\n                if child['accession'] in node_data:\n                    continue\n                g.add_edge(parent_text, child['accession'])\n        else:\n            g.add_edge(parent_text, '...%d...' % (len(children) - 1))\n        g.add_edge(parent_text, ofs_text)\nprint(g)\ng.graph_attr['label']='Ontology tree for Lactase activity'\ng.node_attr['shape']='rectangle'\ng.layout(prog='dot')\ng.draw('graph.png')\nImage(\"graph.png\")\n\nprint(go_id)\nrefs = do_request(ensembl_server, 'ontology/descendants', go_id)\nfor go in refs:\n    print(go['accession'], go['name'], go['definition'])\n\n\n"
  },
  {
    "path": "Chapter05/Getting_Gene.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport gffutils\nimport gzip\nfrom Bio import Seq, SeqIO\n\n# ## Retrieving data\n\n# !rm -f ag.db\n# !wget https://vectorbase.org/common/downloads/release-55/AgambiaePEST/gff/data/VectorBase-55_AgambiaePEST.gff -O gambiae.gff\n# !gzip -9 gambiae.gff\n\ndb = gffutils.FeatureDB('ag.db')\n\n# # Getting a gene\n\ngene_id = 'AGAP004707'\n\ngene = db[gene_id]\n\nprint(gene)\nprint(gene.seqid, gene.strand)\n\nrecs = SeqIO.parse(gzip.open('gambiae.fa.gz', 'rt', encoding='utf-8'), 'fasta')\nfor rec in recs:\n    print(rec.description)\n    if rec.id == gene.seqid:\n        my_seq = rec.seq\n        break\n\n\n# +\ndef get_sequence(chrom_seq, CDSs, strand):\n    seq = Seq.Seq('')\n    for CDS in CDSs:\n        # #FRAME???\n        my_cds = Seq.Seq(str(chrom_seq[CDS.start - 1: CDS.end]))\n        seq += my_cds\n    return seq if strand == '+' else seq.reverse_complement()\n\n\n# +\nmRNAs = db.children(gene, featuretype='mRNA')\nfor mRNA in mRNAs:\n    print(mRNA.id)\n    if mRNA.id.endswith('RA'):\n        break\n\nCDSs = db.children(mRNA, featuretype='CDS', order_by='start')\ngene_seq = get_sequence(my_seq, CDSs, gene.strand)\n\nprint(len(gene_seq), gene_seq)\nprot = gene_seq.translate()\nprint(len(prot), prot)\n# -\n\n# # Reverse strand\n\nreverse_transcript_id = 'AGAP004708-RA'\n\n# +\nreverse_CDSs = db.children(reverse_transcript_id, featuretype='CDS', order_by='start')\nreverse_seq = get_sequence(my_seq, reverse_CDSs, '-')\n\nprint(len(reverse_seq), reverse_seq)\nreverse_prot = reverse_seq.translate()\nprint(len(reverse_prot), reverse_prot)\n# -\n\n\n"
  },
  {
    "path": "Chapter05/Low_Quality.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport gzip\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom Bio import SeqIO, SeqUtils\n# -\n\n# !rm -f atroparvus.fa.gz gambiae.fa.gz 2>/dev/null\n# !wget https://vectorbase.org/common/downloads/Current_Release/AgambiaePEST/fasta/data/VectorBase-67_AgambiaePEST_Genome.fasta -O gambiae.fa\n# !gzip -9 gambiae.fa\n# !wget https://vectorbase.org/common/downloads/Current_Release/AatroparvusEBRO/fasta/data/VectorBase-67_AatroparvusEBRO_Genome.fasta -O atroparvus.fa\n# !gzip -9 atroparvus.fa\n\ngambiae_name = 'gambiae.fa.gz'\natroparvus_name = 'atroparvus.fa.gz'\n\nrecs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')\nfor rec in recs:\n    print(rec.description)\n#Do not do this with atroparvus\n\nrecs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')\nchrom_Ns = {}\nchrom_sizes = {}\nfor rec in recs:\n    if rec.description.find('supercontig') > -1:\n        continue\n    print(rec.description, rec.id, rec)\n    chrom = rec.id.split('_')[1]\n    if chrom in ['UNKN']:#, 'Y_unplaced']:\n        continue\n    chrom_Ns[chrom] = []\n    on_N = False\n    curr_size = 0\n    for pos, nuc in enumerate(rec.seq):\n        if nuc in ['N', 'n']:\n            curr_size += 1\n            on_N = True\n        else:\n            if on_N:\n                chrom_Ns[chrom].append(curr_size)\n                curr_size = 0\n            on_N = False\n    if on_N:\n        chrom_Ns[chrom].append(curr_size)\n    chrom_sizes[chrom] = len(rec.seq)\n\nfor chrom, Ns in chrom_Ns.items():\n    size = chrom_sizes[chrom]\n    if len(Ns) > 0:\n        max_Ns = max(Ns)\n    else:\n        max_Ns = 'NA'\n    print(f'{chrom} ({size}): %Ns ({round(100 * sum(Ns) / size, 1)}), num Ns: {len(Ns)}, max N: {max_Ns}')\n\n# ## Atroparvus super-contigs\n\nrecs = SeqIO.parse(gzip.open(atroparvus_name, 'rt', encoding='utf-8'), 'fasta')\nsizes = []\nsize_N = []\nfor rec in recs:\n    size = len(rec.seq)\n    sizes.append(size)\n    count_N = 0\n    for nuc in rec.seq:\n        if nuc in ['n', 'N']:\n            count_N += 1\n    size_N.append((size, count_N / size))\n\nprint(len(sizes), np.median(sizes), np.mean(sizes), max(sizes), min(sizes),\n      np.percentile(sizes, 10), np.percentile(sizes, 90))\n\nsmall_split = 4800\nlarge_split = 540000\nfig, axs = plt.subplots(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)\nxs, ys = zip(*[(x, 100 * y) for x, y in size_N if x <= small_split])\naxs[0, 0].plot(xs, ys, '.')\nxs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > small_split and x <= large_split])\naxs[0, 1].plot(xs, ys, '.')\naxs[0, 1].set_xlim(small_split, large_split)\nxs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > large_split])\naxs[0, 2].plot(xs, ys, '.')\naxs[0, 0].set_ylabel('Fraction of Ns', fontsize=12)\naxs[0, 1].set_xlabel('Contig size', fontsize=12)\nfig.suptitle('Fraction of Ns per contig size', fontsize=26)\nfig.savefig('frac.png')\n\n\n"
  },
  {
    "path": "Chapter05/Orthology.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport requests\n \nensembl_server = 'http://rest.ensembl.org'\n\ndef do_request(server, service, *args, **kwargs):\n    url_params = ''\n    for a in args:\n        if a is not None:\n            url_params += '/' + a\n    req = requests.get('%s/%s%s' % (server, service, url_params),\n                       params=kwargs,\n                       headers={'Content-Type': 'application/json'})\n \n    if not req.ok:\n        req.raise_for_status()\n    return req.json()\n\n\n# -\n\nanswer = do_request(ensembl_server, 'info/species')\nfor i, sp in enumerate(answer['species']):\n    print(i, sp['name'])\n\next_dbs = do_request(ensembl_server, 'info/external_dbs', 'homo_sapiens', filter='HGNC%')\nprint(ext_dbs)\n\nanswer = do_request(ensembl_server, 'lookup/symbol', 'homo_sapiens', 'LCT')\nprint(answer)\nlct_id = answer['id']\n\nlct_seq = do_request(ensembl_server, 'sequence/id', lct_id)\nprint(lct_seq)\n\nlct_xrefs = do_request(ensembl_server, 'xrefs/id', lct_id)\nfor xref in lct_xrefs:\n    print(xref['db_display_name'])\n    print(xref)\n\nrefs = do_request(ensembl_server, 'xrefs/id', lct_id, external_db='GO', all_levels='1')\nprint(lct_id, refs)\n\nhom_response = do_request(ensembl_server, 'homology/id', lct_id, type='orthologues', sequence='none')\n#print(hom_response['data'][0]['homologies'])\nhomologies = hom_response['data'][0]['homologies']\nfor homology in homologies:\n    print(homology['target']['species'])\n    if homology['target']['species'] != 'equus_caballus':\n        continue\n    print(homology)\n    print(homology['taxonomy_level'])\n    horse_id = homology['target']['id']\n\nhorse_req = do_request(ensembl_server, 'lookup/id', horse_id)\nprint(horse_req)\n\n# +\n#maybe synteny of MCM6 and LCT with caballus and gorilla\n"
  },
  {
    "path": "Chapter05/Reference_Genome.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.4\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nfrom IPython.core.display import Image\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.units import cm\nfrom Bio import SeqIO\nfrom Bio.Graphics import BasicChromosome\n# -\n\n# !rm -f PlasmoDB-9.3_Pfalciparum3D7_Genome.fasta 2>/dev/null\n# vvvv 13.0\n# !wget http://plasmodb.org/common/downloads/release-13.0/Pfalciparum3D7/fasta/data/PlasmoDB-13.0_Pfalciparum3D7_Genome.fasta\n\ngenome_name = 'PlasmoDB-13.0_Pfalciparum3D7_Genome.fasta'\n\nrecs = SeqIO.parse(genome_name, 'fasta')\nchroms = {}\nfor rec in recs:\n    print(rec.description)\n\n# +\nfrom Bio import SeqUtils\n\nchrom_sizes = {}\nchrom_GC = {}\nrecs = SeqIO.parse(genome_name, 'fasta')\nblock_size = 50000\nmin_GC = 100.0\nmax_GC = 0.0\nfor rec in recs:\n    if rec.description.find('SO=chromosome') == -1:\n        continue\n    chrom = int(rec.description.split('_')[1])\n    chrom_GC[chrom] = []\n    size = len(rec.seq)\n    chrom_sizes[chrom] = size\n    num_blocks = size // block_size + 1\n    for block in range(num_blocks):\n        start = block_size * block\n        if block == num_blocks - 1:\n            end = size\n        else:\n            end = block_size + start + 1\n        block_seq = rec.seq[start:end]\n        block_GC = SeqUtils.GC(block_seq)\n        if block_GC < min_GC:\n            min_GC = block_GC\n        if block_GC > max_GC:\n            max_GC = block_GC\n        chrom_GC[chrom].append(block_GC)\nprint(min_GC, max_GC)\n\n# +\nchroms = list(chrom_sizes.keys())\nchroms.sort()\n\nbiggest_chrom = max(chrom_sizes.values())\n\nmy_genome = BasicChromosome.Organism(output_format=\"png\")\n\nmy_genome.page_size = (29.7*cm, 21*cm) # check\ntelomere_length = 10\n\nbottom_GC = 17.5\ntop_GC = 22.0\nfor chrom in chroms:\n    chrom_size = chrom_sizes[chrom]\n    chrom_representation = BasicChromosome.Chromosome('Cr %d' % chrom)\n    chrom_representation.scale_num = biggest_chrom\n\n    tel = BasicChromosome.TelomereSegment()\n    tel.scale = telomere_length\n    chrom_representation.add(tel)\n\n    num_blocks = len(chrom_GC[chrom])\n    for block, gc in enumerate(chrom_GC[chrom]):\n        my_GC = chrom_GC[chrom][block]\n        body = BasicChromosome.ChromosomeSegment()\n        if my_GC > top_GC:\n            body.fill_color = colors.Color(1, 0, 0)\n        elif my_GC < bottom_GC:\n            body.fill_color = colors.Color(1, 1, 0)\n        else:\n            my_color = (my_GC - bottom_GC) / (top_GC - bottom_GC)\n            body.fill_color = colors.Color(my_color, my_color, 1)\n        if block < num_blocks - 1:\n            body.scale = block_size\n        else:\n            body.scale = chrom_size % block_size\n        chrom_representation.add(body)\n\n    tel = BasicChromosome.TelomereSegment(inverted=True)\n    tel.scale = telomere_length\n    chrom_representation.add(tel)\n\n    my_genome.add(chrom_representation)\n\nmy_genome.draw(\"falciparum.png\", \"Plasmodium falciparum\")\nImage(\"falciparum.png\")\n# -\n\n\n"
  },
  {
    "path": "Chapter06/.gitignore",
    "content": "*.log\n*.ped\n*.map\n*.bed\n*.bim\n*.fam\nexclude*.txt\nrelationships_w_pops_041510.txt\n*.in\n*.out"
  },
  {
    "path": "Chapter06/Admixture.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.3\n#   kernelspec:\n#     display_name: Python 3\n#     language: python\n#     name: python3\n# ---\n\n# +\nfrom collections import defaultdict\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom genomics.popgen.admix import cluster, plot\n\n# %matplotlib notebook\n# -\n\nk_range = range(2, 10)  # 2..9\n\n# ### The next cell is very slow. Example outputs are provided (so you can avoid running it)\n\n# +\n#for k in k_range:\n#    os.system('admixture --cv=10 hapmap10_auto_noofs_ld.bed %d > admix.%d' % (k, k))\n# -\n\n# ## Individual order\n\nf = open('hapmap10_auto_noofs_ld.fam')\nind_order = []\nfor l in f:\n    toks = l.rstrip().replace(' ', '\\t').split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    ind_order.append((fam_id, ind_id))\nf.close()\n\n# ## CV-plot\n\nCVs = []\nfor k in k_range:\n    f = open('admix.%d' % k)\n    for l in f:\n        if l.find('CV error') > -1:\n            CVs.append(float(l.rstrip().split(' ')[-1]))\n            break\n    f.close()\nfig = plt.figure(figsize=(16, 9))\nax = fig.add_subplot(111)\nax.plot(k_range, CVs)\nax.set_title('Cross-Validation error')\nax.set_xlabel('K')\n\n# ## Load meta-data\n\nf = open('relationships_w_pops_121708.txt')\npop_ind = defaultdict(list)\nf.readline()  # header\nfor l in f:\n    toks = l.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    if (fam_id, ind_id) not in ind_order:\n        continue\n    mom = toks[2]\n    dad = toks[3]\n    if mom != '0' or dad != '0':\n        continue\n    pop = toks[-1]\n    pop_ind[pop].append((fam_id, ind_id))\n#ind_pop[('2469', 'NA20281')] = ind_pop[('2805', 'NA20281')]\nf.close()\n\n\ndef load_Q(fname, ind_order):\n    ind_comps = {}\n    f = open(fname)\n    for i, l in enumerate(f):\n        comps = [float(x) for x in l.rstrip().split(' ')]\n        ind_comps[ind_order[i]] = comps\n    f.close()\n    return ind_comps\n\n\ncomps = {}\nfor k in k_range:\n    comps[k] = load_Q('hapmap10_auto_noofs_ld.%d.Q' % k, ind_order)\n\nordering = {}\nfor k in k_range:\n    ordering[k] = cluster(comps[k], pop_ind)\n\nfig = plt.figure(figsize=(9, 9))\nplot.single(comps[4], ordering[4], fig)\nNone\n\nfig = plt.figure(figsize=(16, 9))\nplot.stacked(comps, ordering[7], fig)\n\n# ## Q files?\n\n# ## Log-likelihood\n\n\n"
  },
  {
    "path": "Chapter06/Data_Formats.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# ## Data download\n\n# +\n# !wget https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.map.gz\n# !wget https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.ped.gz\n\n# !wget https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/relationships_w_pops_041510.txt\n# -\n\n# !gzip -d hapmap3_r3_b36_fwd.consensus.qc.poly.map.gz\n# !gzip -d hapmap3_r3_b36_fwd.consensus.qc.poly.ped.gz\n\n# # Preparation\n\nimport os\nfrom collections import defaultdict\n\n# ## Loading HapMap meta-data\n\nf = open('relationships_w_pops_041510.txt')\npop_ind = defaultdict(list)\nf.readline()  # header\noffspring = []\nfor l in f:\n    toks = l.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    mom = toks[2]\n    dad = toks[3]\n    if mom != '0' or dad != '0':\n        offspring.append((fam_id, ind_id))\n    pop = toks[-1]\n    pop_ind[pop].append((fam_id, ind_id))\nf.close()\n\n# ## Sub-sampling\n\nos.system('plink2 --pedmap hapmap3_r3_b36_fwd.consensus.qc.poly --out hapmap10 --thin 0.1 --geno 0.1 --export ped')\nos.system('plink2 --pedmap hapmap3_r3_b36_fwd.consensus.qc.poly --out hapmap1 --thin 0.01 --geno 0.1 --export ped')\n\n\n# ## Getting only autosomal data\n\ndef get_non_auto_SNPs(map_file, exclude_file):\n    f = open(map_file)\n    w = open(exclude_file, 'w')\n    for l in f:\n        toks = l.rstrip().split('\\t')\n        try:\n            chrom = int(toks[0])\n        except ValueError:\n            rs = toks[1]\n            w.write('%s\\n' % rs)\n    w.close()\n\n\nget_non_auto_SNPs('hapmap10.map', 'exclude10.txt')\nget_non_auto_SNPs('hapmap1.map', 'exclude1.txt')\n\n# !plink2 --pedmap hapmap10 --out hapmap10_auto --exclude exclude10.txt --export ped\n# !plink2 --pedmap hapmap1 --out hapmap1_auto --exclude exclude1.txt --export ped\n\n\n# ## Removing offspring\n\n# !plink2 --pedmap hapmap10_auto --filter-founders --out hapmap10_auto_noofs --export ped\n\n# ## LD-prunning\n\n# !plink2 --pedmap hapmap10_auto_noofs --indep-pairwise 50 10 0.1 --out keep --export ped\n# !plink2 --pedmap hapmap10_auto_noofs --extract keep.prune.in --out hapmap10_auto_noofs_ld --export ped\n\n# ## Different encoding\n\n# !plink2 --pedmap hapmap10_auto_noofs_ld --out hapmap10_auto_noofs_ld_12 --export ped 12\n# !plink2 --make-bed --pedmap hapmap10_auto_noofs_ld --out hapmap10_auto_noofs_ld\n\n# ## Single chromosome\n\n# !plink2 --pedmap hapmap10_auto_noofs --chr 2 --out hapmap10_auto_noofs_2 --export ped\n"
  },
  {
    "path": "Chapter06/Exploratory_Analysis.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# ## Loading HapMap data\n\n# +\nimport numpy as np\nimport xarray as xr\nimport sgkit as sg\nfrom sgkit.io import plink\n\ndata = plink.read_plink(path='hapmap10_auto_noofs_ld', fam_sep='\\t')\n# -\n\ndata\n\nprint(data.dims)\n\nvariant_stats = sg.variant_stats(data)\nvariant_stats\n\nvariant_stats.variant_call_rate.to_series().describe()\n\nprint(type(variant_stats.variant_call_rate.to_series()))\n\nsample_stats = sg.sample_stats(data)\nsample_stats\n\nsample_stats.sample_call_rate.to_series().hist()\n\ndata['sample_cohort'] = xr.DataArray(\n    np.zeros(data.dims['samples'], dtype=np.int64),\n    dims='samples')\n# data[\"sample_cohort\"] = xr.DataArray(np.repeat([0, 1], data.dims[\"samples\"] // 2), dims=\"samples\")\n\nsg.cohort_allele_frequencies(data)['cohort_allele_frequency'][:,:,0].values\n\nsg.cohort_allele_frequencies(data)['cohort_allele_frequency'][:,:,0].to_series().hist()\n\n\n\n# # maf\n\ncohort_allele_frequency = sg.cohort_allele_frequencies(data)['cohort_allele_frequency'].values\n\nmin_freqs = map(\n    lambda x: x if x < 0.5 else 1 - x,\n    filter(\n        lambda x: x not in [0, 1],\n        cohort_allele_frequency[:, 0, 0]))\n\n\n"
  },
  {
    "path": "Chapter06/PCA.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.3\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# + jupyter={\"outputs_hidden\": false}\nimport os\n\nfrom genomics.popgen.plink.convert import to_eigen\nfrom genomics.popgen.pca import plot, smart\n# %matplotlib inline\n# -\n\n# ## Meta-data load\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('relationships_w_pops_121708.txt')\nind_pop = {}\nf.readline()  # header\nfor l in f:\n    toks = l.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    pop = toks[-1]\n    ind_pop['/'.join([fam_id, ind_id])] = pop\nf.close()\nind_pop['2469/NA20281'] = ind_pop['2805/NA20281']\n# -\n\n# ## Requires plink from data preparation\n\n# + jupyter={\"outputs_hidden\": false}\nto_eigen('hapmap10_auto_noofs_ld_12', 'hapmap10_auto_noofs_ld_12')\n# -\n\n# ## Running smartpca\n\n# + jupyter={\"outputs_hidden\": false}\nctrl = smart.SmartPCAController('hapmap10_auto_noofs_ld_12')\nctrl.run()\n\n# + jupyter={\"outputs_hidden\": false}\nwei, wei_perc, ind_comp = smart.parse_evec('hapmap10_auto_noofs_ld_12.evec', 'hapmap10_auto_noofs_ld_12.eval')\n\n# + jupyter={\"outputs_hidden\": false}\nplot.render_pca(ind_comp, 1, 2, cluster=ind_pop)\n#put weights\n\n# + jupyter={\"outputs_hidden\": false}\nplot.render_pca_eight(ind_comp, cluster=ind_pop)\n\n# + jupyter={\"outputs_hidden\": false}\nmarkers = { 'CHB': '*', 'CHD': '*', 'JPT': '*', 'GIH': '*',\n           'CEU': 'v', 'TSI': 'v', 'MEX': 'v',\n           'ASW': 'o', 'LWK': 'o', 'YRI': 'o', 'MKK': 'o'\n           }\n\n# -\n\n# ## With scikit-learn\n\n# + jupyter={\"outputs_hidden\": false}\nfrom sklearn.decomposition import PCA\nimport numpy as np\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('hapmap10_auto_noofs_ld_12.ped')\nninds = 0\nind_order = []\nfor line in f:\n    ninds += 1\n    toks = line[:100].replace(' ', '\\t').split('\\t') #  for speed\n    fam_id = toks[0]\n    ind_id = toks[1]\n    ind_order.append('%s/%s' % (fam_id, ind_id))\nnsnps = (len(line.replace(' ', '\\t').split('\\t')) - 6) // 2\nprint (nsnps)\nf.close()\n\n# + jupyter={\"outputs_hidden\": false}\npca_array = np.empty((ninds, nsnps), dtype=int)\nprint(pca_array.shape)\nf = open('hapmap10_auto_noofs_ld_12.ped')\nfor ind, line in enumerate(f):\n    snps = line.replace(' ', '\\t').split('\\t')[6:]\n    for pos in range(len(snps) // 2):\n        a1 = int(snps[2 * pos])\n        a2 = int(snps[2 * pos])\n        my_code = a1 + a2 - 2\n        pca_array[ind, pos] = my_code\nf.close()\n#slow\n\n# + jupyter={\"outputs_hidden\": false}\nmy_pca = PCA(n_components=8)\nmy_pca.fit(pca_array)\ntrans = my_pca.transform(pca_array)\n#Memory required\n\n# + jupyter={\"outputs_hidden\": false}\nsc_ind_comp = {}\nfor i, ind_pca in enumerate(trans):\n    sc_ind_comp[ind_order[i]] = ind_pca\nplot.render_pca_eight(sc_ind_comp, cluster=ind_pop)\n\n# + jupyter={\"outputs_hidden\": false}\n\n"
  },
  {
    "path": "Chapter06/Pop_Stats.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# ## Loading HapMap meta-data\n\n# +\nfrom collections import defaultdict\nfrom pprint import pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport xarray as xr\nimport sgkit as sg\nfrom sgkit.io import plink\n\ndata = plink.read_plink(path='hapmap10_auto_noofs_ld', fam_sep='\\t')\n# -\n\ndata\n\nf = open('relationships_w_pops_041510.txt')\npop_ind = defaultdict(list)\nf.readline()  # header\nfor line in f:\n    toks = line.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    pop = toks[-1]\n    pop_ind[pop].append((fam_id, ind_id))\n\npops = list(pop_ind.keys())\n\n\ndef assign_cohort(pops, pop_ind, sample_family_id, sample_id):\n    cohort = []\n    for fid, sid in zip(sample_family_id, sample_id):\n        processed = False\n        for i, pop in enumerate(pops):\n            if (fid, sid) in pop_ind[pop]:\n                processed = True\n                cohort.append(i)\n                break\n        if not processed:\n            raise Exception(f'Not processed {fid}, {sid}')\n    return cohort\n\n\ncohort = assign_cohort(pops, pop_ind, data.sample_family_id.values, data.sample_id.values)\n\ndata['sample_cohort'] = xr.DataArray(\n    cohort, dims='samples')\n\n# # monomorphic positions per pop\n\ncohort_allele_frequency = sg.cohort_allele_frequencies(data)['cohort_allele_frequency'].values\n\nmonom = {}\nfor i, pop in enumerate(pops):\n    monom[pop] = len(list(filter(lambda x: x, np.isin(cohort_allele_frequency[:, i, 0], [0, 1]))))\npprint(monom)\n\n# # MAF\n\nmafs = {}\nfor i, pop in enumerate(pops):\n    min_freqs = map(\n        lambda x: x if x < 0.5 else 1 - x,\n        filter(\n            lambda x: x not in [0, 1],\n            cohort_allele_frequency[:, i, 0]))\n    mafs[pop] = pd.Series(min_freqs)\n\nmaf_plot, maf_ax = plt.subplots(nrows=2, sharey=True)\nmafs['YRI'].hist(ax=maf_ax[0], bins=50)\nmaf_ax[0].set_title('*YRI*')\nmafs['JPT'].hist(ax=maf_ax[1], bins=50)\nmaf_ax[1].set_title('*JPT*')\nmaf_ax[1].set_xlabel('MAF')\n\n# # Fst\n\nfst = sg.Fst(data)\n\nfst = fst.assign_coords({\"cohorts_0\": pops, \"cohorts_1\": pops})\n\nremove_nan = lambda data: filter(lambda x: not np.isnan(x), data)\nceu_chb = pd.Series(remove_nan(fst.stat_Fst.sel(cohorts_0='CEU', cohorts_1='CHB').values))\nchb_chd = pd.Series(remove_nan(fst.stat_Fst.sel(cohorts_0='CHB', cohorts_1='CHD').values))\n\nceu_chb.describe()\n\nchb_chd.describe()\n\nmean_fst = {}\nfor i, pop_i in enumerate(pops):\n    for j, pop_j in enumerate(pops):\n        if j <= i:\n            continue\n        pair_fst = pd.Series(remove_nan(fst.stat_Fst.sel(cohorts_0=pop_i, cohorts_1=pop_j).values))\n        mean = pair_fst.mean()\n        mean_fst[(pop_i, pop_j)] = mean\n\nmin_pair = min(mean_fst.values())\nmax_pair = max(mean_fst.values())\n\nsns.set_style(\"white\")\nnum_pops = len(pops)\narr = np.ones((num_pops - 1, num_pops - 1, 3), dtype=float)\nfig = plt.figure(figsize=(16, 9))\nax = fig.add_subplot(111)\nfor row in range(num_pops - 1):\n    pop_i = pops[row]\n    for col in range(row + 1, num_pops):\n        pop_j = pops[col]\n        val = mean_fst[(pop_i, pop_j)]\n        norm_val = (val - min_pair) / (max_pair - min_pair)\n        ax.text(col - 1, row, '%.3f' % val, ha='center')\n        if norm_val == 0.0:\n            arr[row, col - 1, 0] = 1\n            arr[row, col - 1, 1] = 1\n            arr[row, col - 1, 2] = 0\n        elif norm_val == 1.0:\n            arr[row, col - 1, 0] = 1\n            arr[row, col - 1, 1] = 0\n            arr[row, col - 1, 2] = 1\n        else:\n            arr[row, col - 1, 0] = 1 - norm_val\n            arr[row, col - 1, 1] = 1\n            arr[row, col - 1, 2] = 1\nax.imshow(arr, interpolation='none')\nax.set_title('Multilocus Pairwise FST')\nax.set_xticks(range(num_pops - 1))\nax.set_xticklabels(pops[1:])\nax.set_yticks(range(num_pops - 1))\nax.set_yticklabels(pops[:-1])\n"
  },
  {
    "path": "Chapter06/Sgkit.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport os\nfrom collections import defaultdict\n\n# ## Loading HapMap data\n\n# +\nimport numpy as np\nfrom sgkit.io import plink\n\ndata = plink.read_plink(path='hapmap10_auto_noofs_ld', fam_sep='\\t')\n# -\n\ndata\n\nprint(data.dims)\n\nprint(len(data.sample_id.values))\nprint(data.sample_id.values)\nprint(data.sample_family_id.values)\nprint(data.sample_sex.values)\n\nprint(data.contigs)\n\nprint(len(data.variant_contig.values))\nprint(data.variant_contig.values)\nprint(data.variant_position.values)\nprint(data.variant_allele.values)\nprint(data.variant_id.values)\n\ndata.call_genotype\n\ncall_genotype = data.call_genotype.values\nprint(call_genotype.shape)\nfirst_individual = call_genotype[:,0,:]\nfirst_variant = call_genotype[0,:,:]\nfirst_variant_of_first_individual = call_genotype[0,0,:]\nprint(first_variant_of_first_individual)\nprint(data.sample_family_id.values[0], data.sample_id.values[0])\nprint(data.variant_allele.values[0])\n\n\n"
  },
  {
    "path": "Chapter07/.gitignore",
    "content": "*fasta\ntrim.fasta.reduced\n*nex\nbp_rx"
  },
  {
    "path": "Chapter07/Alignment.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport os\n\nimport dendropy\n# -\n\n# ## Genome alignment\n\nfrom Bio.Align.Applications import MafftCommandline\nmafft_cline = MafftCommandline(input='sample.fasta', ep=0.123, reorder=True, maxiterate=1000, localpair=True)\nprint(mafft_cline)\nstdout, stderr = mafft_cline()\nwith open('align.fasta', 'w') as w:\n    w.write(stdout)\n\nos.system('trimal -automated1 -in align.fasta -out trim.fasta -fasta')\n\n\n# ## Protein alignment\n\n# +\nfrom Bio.Align.Applications import MuscleCommandline\n\nmy_genes = ['NP', 'L', 'VP35', 'VP40']\n\nfor gene in my_genes:\n    muscle_cline = MuscleCommandline(input='%s_P.fasta' % gene)\n    print(muscle_cline)\n    stdout, stderr = muscle_cline()\n    with open('%s_P_align.fasta' % gene, 'w') as w:\n        w.write(stdout)\n\n# +\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n# XXX vvv\n# from Bio.Alphabet import generic_protein\n\nfor gene in my_genes:\n    gene_seqs = {}\n    unal_gene = SeqIO.parse('%s.fasta' % gene, 'fasta')\n    for rec in unal_gene:\n        gene_seqs[rec.id] = rec.seq\n\n    al_prot = SeqIO.parse('%s_P_align.fasta' % gene, 'fasta')\n    al_genes = []\n    for protein in al_prot:\n        my_id = protein.id\n        seq = ''\n        pos = 0\n        for c in protein.seq:\n            if c == '-':\n                seq += '---'\n            else:\n                seq += str(gene_seqs[my_id][pos:pos + 3])\n                pos += 3\n        al_genes.append(SeqRecord(Seq(seq), id=my_id))\n\n\n    SeqIO.write(al_genes, '%s_align.fasta' % gene, 'fasta')\n# -\n\n\n"
  },
  {
    "path": "Chapter07/Comparison.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.6\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nimport dendropy\nfrom dendropy.calculate import popgenstat\n# -\n\n# ## Genes\n\n# +\ngenes_species = OrderedDict()\nmy_species = ['RESTV', 'SUDV']\nmy_genes = ['NP', 'L', 'VP35', 'VP40']\n\n\nfor name in my_genes:\n    gene_name = name.split('.')[0]\n    char_mat = dendropy.DnaCharacterMatrix.get_from_path('%s_align.fasta' % name, 'fasta')\n    genes_species[gene_name] = {}\n    \n    for species in my_species:\n        genes_species[gene_name][species] = dendropy.DnaCharacterMatrix()\n    for taxon, char_map in char_mat.items():\n        species = taxon.label.split('_')[0]\n        if species in my_species:\n            genes_species[gene_name][species].taxon_namespace.add_taxon(taxon)\n            genes_species[gene_name][species][taxon] = char_map\n# -\n\nsummary = np.ndarray(shape=(len(genes_species), 4 * len(my_species)))\nstats = ['seg_sites', 'nuc_div', 'taj_d', 'wat_theta']\nfor row, (gene, species_data) in enumerate(genes_species.items()):\n    for col_base, species in enumerate(my_species):\n        summary[row, col_base * 4] = popgenstat.num_segregating_sites(species_data[species])\n        summary[row, col_base * 4 + 1] = popgenstat.nucleotide_diversity(species_data[species])\n        summary[row, col_base * 4 + 2] = popgenstat.tajimas_d(species_data[species])\n        summary[row, col_base * 4 + 3] = popgenstat.wattersons_theta(species_data[species])\ncolumns = []\nfor species in my_species:\n    columns.extend(['%s (%s)' % (stat, species) for stat in stats])\ndf = pd.DataFrame(summary, index=genes_species.keys(), columns=columns)\ndf # vs print(df)\n\n\n# ## Genomes\n\ndef do_basic_popgen(seqs):\n    num_seg_sites = popgenstat.num_segregating_sites(seqs)\n    avg_pair = popgenstat.average_number_of_pairwise_differences(seqs)\n    nuc_div = popgenstat.nucleotide_diversity(seqs)\n    print('Segregating sites: %d, Avg pairwise diffs: %.2f, Nucleotide diversity %.6f' % (num_seg_sites, avg_pair, nuc_div))\n    print(\"Watterson's theta: %s\" % popgenstat.wattersons_theta(seqs))\n    print(\"Tajima's D: %s\" % popgenstat.tajimas_d(seqs))\n\n\n#XXX change\nebov_seqs = dendropy.DnaCharacterMatrix.get_from_path(\n    'trim.fasta', schema='fasta', data_type='dna')\nsl_2014 = []\ndrc_2007 = []\nebov2007_set = dendropy.DnaCharacterMatrix()\nebov2014_set = dendropy.DnaCharacterMatrix()\nfor taxon, char_map in ebov_seqs.items():\n    print(taxon.label)\n    if taxon.label.startswith('EBOV_2014') and len(sl_2014) < 8:\n        sl_2014.append(char_map)\n        ebov2014_set.taxon_namespace.add_taxon(taxon)\n        ebov2014_set[taxon] = char_map\n    elif taxon.label.startswith('EBOV_2007'):\n        drc_2007.append(char_map)\n        ebov2007_set.taxon_namespace.add_taxon(taxon)\n        ebov2007_set[taxon] = char_map\n        #ebov2007_set.extend_map({taxon: char_map})\ndel ebov_seqs\n\n# +\nprint('2007 outbreak:')\nprint('Number of individuals: %s' % len(ebov2007_set.taxon_namespace))\ndo_basic_popgen(ebov2007_set)\n\nprint('\\n2014 outbreak:')\nprint('Number of individuals: %s' % len(ebov2014_set.taxon_namespace))\ndo_basic_popgen(ebov2014_set)\n# -\n\nprint(len(sl_2014))\nprint(len(drc_2007))\n\npair_stats = popgenstat.PopulationPairSummaryStatistics(sl_2014, drc_2007)\n\nprint('Average number of pairwise differences irrespective of population: %.2f' %\n      pair_stats.average_number_of_pairwise_differences)\nprint('Average number of pairwise differences between populations: %.2f' %\n      pair_stats.average_number_of_pairwise_differences_between)\nprint('Average number of pairwise differences within populations: %.2f' %\n      pair_stats.average_number_of_pairwise_differences_within)\nprint('Average number of net pairwise differences : %.2f' %\n      pair_stats.average_number_of_pairwise_differences_net)\nprint('Number of segregating sites: %d' %\n      pair_stats.num_segregating_sites)\nprint(\"Watterson's theta: %.2f\" %\n      pair_stats.wattersons_theta)\nprint(\"Wakeley's Psi: %.3f\" % pair_stats.wakeleys_psi)\nprint(\"Tajima's D: %.2f\" % pair_stats.tajimas_d)\n\n\n"
  },
  {
    "path": "Chapter07/Exploration.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.6\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport dendropy\nfrom dendropy.interop import genbank\n\n\n# ## Getting the data\n\n# +\ndef get_ebov_2014_sources():\n    #EBOV_2014\n    #yield 'EBOV_2014', genbank.GenBankDna(id_range=(233036, 233118), prefix='KM')\n    yield 'EBOV_2014', genbank.GenBankDna(id_range=(34549, 34563), prefix='KM0')\n    \ndef get_other_ebov_sources():\n    #EBOV other\n    yield 'EBOV_1976', genbank.GenBankDna(ids=['AF272001', 'KC242801'])\n    yield 'EBOV_1995', genbank.GenBankDna(ids=['KC242796', 'KC242799'])\n    yield 'EBOV_2007', genbank.GenBankDna(id_range=(84, 90), prefix='KC2427')\n    \ndef get_other_ebolavirus_sources():\n    #BDBV\n    yield 'BDBV', genbank.GenBankDna(id_range=(3, 6), prefix='KC54539')\n    yield 'BDBV', genbank.GenBankDna(ids=['FJ217161'])\n\n    #RESTV\n    yield 'RESTV', genbank.GenBankDna(ids=['AB050936', 'JX477165', 'JX477166', 'FJ621583', 'FJ621584', 'FJ621585']) \n\n    #SUDV\n    yield 'SUDV', genbank.GenBankDna(ids=['KC242783', 'AY729654', 'EU338380',\n                                          'JN638998', 'FJ968794', 'KC589025', 'JN638998'])\n    #yield 'SUDV', genbank.GenBankDna(id_range=(89, 92), prefix='KC5453')    \n\n    #TAFV\n    yield 'TAFV', genbank.GenBankDna(ids=['FJ217162'])\n\n\n# +\nother = open('other.fasta', 'w')\nsampled = open('sample.fasta', 'w')\n\nfor species, recs in get_other_ebolavirus_sources():\n    tn = dendropy.TaxonNamespace()\n    char_mat = recs.generate_char_matrix(taxon_namespace=tn,\n        gb_to_taxon_fn=lambda gb: tn.require_taxon(label='%s_%s' % (species, gb.accession)))\n    char_mat.write_to_stream(other, 'fasta')\n    char_mat.write_to_stream(sampled, 'fasta')\nother.close()\nebov_2014 = open('ebov_2014.fasta', 'w')\nebov = open('ebov.fasta', 'w')\nfor species, recs in get_ebov_2014_sources():\n    tn = dendropy.TaxonNamespace()\n    char_mat = recs.generate_char_matrix(taxon_namespace=tn,\n        gb_to_taxon_fn=lambda gb: tn.require_taxon(label='EBOV_2014_%s' % gb.accession))\n    char_mat.write_to_stream(ebov_2014, 'fasta')\n    char_mat.write_to_stream(sampled, 'fasta')\n    char_mat.write_to_stream(ebov, 'fasta')\nebov_2014.close()\n\nebov_2007 = open('ebov_2007.fasta', 'w')\nfor species, recs in get_other_ebov_sources():\n    tn = dendropy.TaxonNamespace()\n    char_mat = recs.generate_char_matrix(taxon_namespace=tn,\n        gb_to_taxon_fn=lambda gb: tn.require_taxon(label='%s_%s' % (species, gb.accession)))\n    char_mat.write_to_stream(ebov, 'fasta')\n    char_mat.write_to_stream(sampled, 'fasta')\n    if species == 'EBOV_2007':\n        char_mat.write_to_stream(ebov_2007, 'fasta')\n\nebov.close()\nebov_2007.close()\nsampled.close()\n# -\n\n# ## Genes\n\n# +\nmy_genes = ['NP', 'L', 'VP35', 'VP40']\n\ndef dump_genes(species, recs, g_dls, p_hdls):\n    for rec in recs:\n\n        for feature in rec.feature_table:\n                    if feature.key == 'CDS':\n                        gene_name = None\n                        for qual in feature.qualifiers:\n                            if qual.name == 'gene':\n                                if qual.value in my_genes:\n                                    gene_name = qual.value\n                            elif qual.name == 'translation':\n                                protein_translation = qual.value\n                        if gene_name is not None:\n                            locs = feature.location.split('.')\n                            start, end = int(locs[0]), int(locs[-1])\n                            g_hdls[gene_name].write('>%s_%s\\n' % (species, rec.accession))\n                            p_hdls[gene_name].write('>%s_%s\\n' % (species, rec.accession))\n                            g_hdls[gene_name].write('%s\\n' % rec.sequence_text[start - 1 : end])\n                            p_hdls[gene_name].write('%s\\n' % protein_translation)\n\ng_hdls = {}\np_hdls = {}\nfor gene in my_genes:\n    g_hdls[gene] = open('%s.fasta' % gene, 'w')\n    p_hdls[gene] = open('%s_P.fasta' % gene, 'w')\nfor species, recs in get_other_ebolavirus_sources():\n    if species in ['RESTV', 'SUDV']:\n        dump_genes(species, recs, g_hdls, p_hdls)\nfor gene in my_genes:\n    g_hdls[gene].close()\n    p_hdls[gene].close()\n\n\n# -\n\n# ## Genome exploration\n\ndef describe_seqs(seqs):\n    print('Number of sequences: %d' % len(seqs.taxon_namespace))\n    print('First 10 taxon sets: %s' % ' '.join([taxon.label for taxon in seqs.taxon_namespace[:10]]))\n    lens = []\n    for tax, seq in seqs.items():\n        lens.append(len([x for x in seq.symbols_as_list() if x != '-']))\n    print('Genome length: min %d, mean %.1f, max %d' % (min(lens), sum(lens) / len(lens), max(lens)))\n\n\nebov_seqs = dendropy.DnaCharacterMatrix.get_from_path('ebov.fasta', schema='fasta', data_type='dna')\nprint('EBOV')\ndescribe_seqs(ebov_seqs)\ndel ebov_seqs\n\nprint('ebolavirus sequences')\nebolav_seqs = dendropy.DnaCharacterMatrix.get_from_path('other.fasta', schema='fasta', data_type='dna')\ndescribe_seqs(ebolav_seqs)\nfrom collections import defaultdict\nspecies = defaultdict(int)\nfor taxon in ebolav_seqs.taxon_namespace:\n    toks = taxon.label.split('_')\n    my_species = toks[0]\n    if my_species == 'EBOV':\n        ident = '%s (%s)' % (my_species, toks[1])\n    else:\n        ident = my_species\n    species[ident] += 1\nfor my_species, cnt in species.items():\n    print(\"%20s: %d\" % (my_species, cnt))\ndel ebolav_seqs\n\n# ## Genes\n\n# +\nimport os\ngene_length = {}\nmy_genes = ['NP', 'L', 'VP35', 'VP40']\n\nfor name in my_genes:\n    gene_name = name.split('.')[0]\n    seqs = dendropy.DnaCharacterMatrix.get_from_path('%s.fasta' % name, schema='fasta', data_type='dna')\n    gene_length[gene_name] = []\n    for tax, seq in seqs.items():\n        gene_length[gene_name].append(len([x for x in seq.symbols_as_list() if x != '-']))\nfor gene, lens in gene_length.items():\n    print ('%6s: %d' % (gene, sum(lens) / len(lens)))\n# -\n\n\n"
  },
  {
    "path": "Chapter07/Reconstruction.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.6\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport os\nimport random\nimport shutil\nimport sys\n\nimport dendropy\nfrom dendropy.interop import raxml\n# -\n\nebola_data = dendropy.DnaCharacterMatrix.get_from_path('trim.fasta', 'fasta')\nrx = raxml.RaxmlRunner()\nebola_tree = rx.estimate_tree(ebola_data, ['-m', 'GTRGAMMA', '-N', '10'])\nprint('RAxML temporary directory: %s' % rx.working_dir_path)\ndel ebola_data\n\nebola_tree.write_to_path('my_ebola.nex', 'nexus')\n\n# +\nimport matplotlib.pyplot as plt\nfrom Bio import Phylo\n# # %matplotlib inline\nmy_ebola_tree = Phylo.read('my_ebola.nex', 'nexus')\nmy_ebola_tree.name = 'Our Ebolavirus tree'\n\nfig = plt.figure(figsize=(16, 18))\nax = fig.add_subplot(1, 1, 1)\nPhylo.draw(my_ebola_tree, axes=ax)\n# -\n\n# ## RAxML with Biopython\n\n# XXX change\nfrom Bio.Phylo.Applications import RaxmlCommandline\nraxml_cline = RaxmlCommandline(sequences='trim.fasta',\n                               model='GTRGAMMA', name='biopython',\n                               num_replicates='10',\n                               parsimony_seed=random.randint(0, sys.maxsize),\n                               working_dir=os.getcwd() + os.sep + 'bp_rx')\nprint(raxml_cline)\ntry:\n    os.mkdir('bp_rx')\nexcept OSError:\n    shutil.rmtree('bp_rx')\n    os.mkdir('bp_rx')\nout, err = raxml_cline()\n\nfrom Bio import Phylo\nbiopython_tree = Phylo.read('bp_rx/RAxML_bestTree.biopython', 'newick')\n\nprint(biopython_tree)\n\n\n"
  },
  {
    "path": "Chapter07/Selection.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3\n#     language: python\n#     name: python3\n# ---\n\n# +\n### XXX This is probably to remove\n# -\n\nsl_2014 = []\ndrc_2007 = []\nfor seq in ebola_seqs.taxon_set:\n    if seq.label.startswith('EBOV_2014') and len(sl_2014) < 8:\n        sl_2014.append(ebola_seqs[seq])\n    elif seq.label.startswith('EBOV_2007'):\n        drc_2007.append(ebola_seqs[seq])\n\nprint(len(sl_2014))\nprint(len(drc_2007))\n\npair_stats = popgenstat.PopulationPairSummaryStatistics(sl_2014, drc_2007)\n\nprint('Average number of pairwise differences (total): %s' %\n      pair_stats.average_number_of_pairwise_differences)\nprint('Average number of pairwise differences between populations: %s' %\n      pair_stats.average_number_of_pairwise_differences_between)\nprint('Average number of pairwise differences within populations: %s' %\n      pair_stats.average_number_of_pairwise_differences_within)\nprint('Average number of new pairwise differences : %s' %\n      pair_stats.average_number_of_pairwise_differences_net)\nprint('Number of segregating sites: %s' %\n      pair_stats.num_segregating_sites)\nprint(\"Watterson's theta: %s\" %\n      pair_stats.wattersons_theta)\nprint(\"Wakeley's Psi: %s\" % pair_stats.wakeleys_psi)\nprint(\"Tajima's D: %s\" % pair_stats.tajimas_d)\n"
  },
  {
    "path": "Chapter07/Trees.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.6\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport dendropy\n\nebola_raxml = dendropy.Tree.get_from_path('my_ebola.nex', 'nexus')\n\n\n# +\ndef compute_level(node, level=0):\n    for child in node.child_nodes():\n        compute_level(child, level + 1)\n    if node.taxon is not None:\n        print(\"%s: %d %d\" % (node.taxon, node.level(), level))\n\ncompute_level(ebola_raxml.seed_node)\n\n\n# +\ndef compute_height(node):\n    children = node.child_nodes()\n    if len(children) == 0:\n        height = 0\n    else:\n        height = 1 + max(map(lambda x: compute_height(x), children))\n    desc = node.taxon or 'Internal'\n    print(\"%s: %d %d\" % (desc, height, node.level()))\n    return height\n\ncompute_height(ebola_raxml.seed_node)\n\n\n# +\ndef compute_nofs(node):\n    children = node.child_nodes()\n    nofs = len(children)\n    map(lambda x: compute_nofs(x), children)\n    desc = node.taxon or 'Internal'\n    print(\"%s: %d %d\" % (desc, nofs, node.level()))\n\ncompute_nofs(ebola_raxml.seed_node)\n\n\n# +\ndef print_nodes(node):\n    for child in node.child_nodes():\n        print_nodes(child)\n    if node.taxon is not None:\n        print('%s (%d)' % (node.taxon, node.level()))\n\nprint_nodes(ebola_raxml.seed_node)\n\n# +\nfrom collections import deque\n\ndef print_breadth(tree):\n    queue = deque()\n    queue.append(tree.seed_node)\n    while len(queue) > 0:\n        process_node = queue.popleft()\n        if process_node.taxon is not None:\n            print('%s (%d)' % (process_node.taxon, process_node.level()))\n        else:\n            for child in process_node.child_nodes():\n                queue.append(child)\n\nprint_breadth(ebola_raxml)\n\n# +\nfrom copy import deepcopy\nsimple_ebola = deepcopy(ebola_raxml)\n\ndef simplify_tree(node):\n    prefs = set()\n    for leaf in node.leaf_nodes():\n        my_toks = leaf.taxon.label.split(' ')[0].split('_')\n        if my_toks[0] == 'EBOV':\n            prefs.add('EBOV' + my_toks[1])\n        else:\n            prefs.add(my_toks[0])\n    if len(prefs) == 1:\n        print(prefs, len(node.leaf_nodes()))\n        node.taxon = dendropy.Taxon(label=list(prefs)[0])\n        #node.collapse_clade()\n        node.set_child_nodes([])\n    else:\n        for child in node.child_nodes():\n            simplify_tree(child)\n\nsimplify_tree(simple_ebola.seed_node)\nsimple_ebola.ladderize()\nsimple_ebola.write_to_path('ebola_simple.nex', 'nexus')\n# -\n\n\n"
  },
  {
    "path": "Chapter07/Visualization.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.6\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\nfrom Bio import Phylo\n\nebola_tree = Phylo.read('my_ebola.nex', 'nexus')\nebola_tree.name = 'Ebolavirus tree'\nebola_simple_tree = Phylo.read('ebola_simple.nex', 'nexus')\nebola_simple_tree.name = 'Ebolavirus simplified tree'\n\nPhylo.draw_ascii(ebola_simple_tree)\nPhylo.draw_ascii(ebola_tree)\n\nfig = plt.figure(figsize=(16, 22))\nax = fig.add_subplot(111)\nPhylo.draw(ebola_simple_tree, axes=ax, branch_labels=\n           lambda c: c.branch_length if c.branch_length > 0.02 else None)\n\n# +\nfig = plt.figure(figsize=(16, 22))\nax = fig.add_subplot(111)\nfrom collections import OrderedDict\nmy_colors = OrderedDict({\n'EBOV_2014': 'red',\n'EBOV': 'magenta',\n'BDBV': 'cyan',\n'SUDV': 'blue',\n'RESTV' : 'green',\n'TAFV' : 'yellow'\n})\n\ndef get_color(name):\n    for pref, color in my_colors.items():\n        if name.find(pref) > -1:\n            return color\n    return 'grey'\n\ndef color_tree(node, fun_color=get_color):\n    if node.is_terminal():\n        node.color = fun_color(node.name)\n    else:\n        my_children = set()\n        for child in node.clades:\n            color_tree(child, fun_color)\n            my_children.add(child.color.to_hex())\n        if len(my_children) == 1:\n            node.color = child.color\n        else:\n            node.color = 'grey'\n\nebola_color_tree = deepcopy(ebola_tree)\ncolor_tree(ebola_color_tree.root)\nPhylo.draw(ebola_color_tree, axes=ax, label_func=\n           lambda x: x.name.split(' ')[0][1:] if x.name is not None else None)\n# -\n\n\n"
  },
  {
    "path": "Chapter08/.gitignore",
    "content": "*ent\n*fasta"
  },
  {
    "path": "Chapter08/Distance.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport math\nimport timeit\n\nfrom Bio import PDB\n# -\n\nrepository = PDB.PDBList()\nparser = PDB.PDBParser()\nrepository.retrieve_pdb_file('1TUP', file_format='pdb', pdir='.')  # XXX\np53_1tup = parser.get_structure('P 53', 'pdb1tup.ent')\n\nzns = []\nfor atom in p53_1tup.get_atoms():\n    if atom.element == 'ZN':\n        #print(atom, dir(atom), atom.mass, atom.element, atom.coord[0])\n        zns.append(atom)\nfor zn in zns:\n        print(zn, zn.coord)\n\n\n# +\n#Suggest a pymol viewing\n# -\n\n#Try this in numba?\ndef get_closest_atoms(pdb_struct, ref_atom, distance):\n    atoms = {}\n    rx, ry, rz = ref_atom.coord\n    for atom in pdb_struct.get_atoms():\n        if atom == ref_atom:\n            continue\n        x, y, z = atom.coord\n        my_dist = math.sqrt((x - rx)**2 + (y - ry)**2 + (z - rz)**2) \n        if my_dist < distance:\n            atoms[atom] = my_dist\n    return atoms\n\n\nfor zn in zns:\n    print()\n    print(zn.coord)\n    atoms = get_closest_atoms(p53_1tup, zn, 4)\n    for atom, distance in atoms.items():\n        print(atom.element, distance, atom.coord)\n\nfor distance in [1, 2, 4, 8, 16, 32, 64, 128]:\n    my_atoms = []\n    for zn in zns:\n        atoms = get_closest_atoms(p53_1tup, zn, distance)\n        my_atoms.append(len(atoms))\n    print(distance, my_atoms)\n\nnexecs = 10\nprint(timeit.timeit('get_closest_atoms(p53_1tup, zns[0], 4.0)',\n                    'from __main__ import get_closest_atoms, p53_1tup, zns',\n                    number=nexecs) / nexecs * 1000)\n\n\ndef get_closest_alternative(pdb_struct, ref_atom, distance):\n    atoms = {}\n    rx, ry, rz = ref_atom.coord\n    for atom in pdb_struct.get_atoms():\n        if atom == ref_atom:\n            continue\n        x, y, z = atom.coord\n        if abs(x - rx) > distance or abs(y - ry) > distance or abs(z - rz) > distance:\n            continue\n        my_dist = math.sqrt((x - rx)**2 + (y - ry)**2 + (z - rz)**2) \n        if my_dist < distance:\n            atoms[atom] = my_dist\n    return atoms\n\n\nprint(timeit.timeit('get_closest_alternative(p53_1tup, zns[0], 4.0)',\n                    'from __main__ import get_closest_alternative, p53_1tup, zns',\n                    number=nexecs) / nexecs * 1000)\n\nprint('Standard')\nfor distance in [1, 4, 16, 64, 128]:\n    print(timeit.timeit('get_closest_atoms(p53_1tup, zns[0], distance)',\n                        'from __main__ import get_closest_atoms, p53_1tup, zns, distance',\n                        number=nexecs) / nexecs * 1000)\nprint('Optimized')\nfor distance in [1, 4, 16, 64, 128]:\n    print(timeit.timeit('get_closest_alternative(p53_1tup, zns[0], distance)',\n                        'from __main__ import get_closest_alternative, p53_1tup, zns, distance',\n                        number=nexecs) / nexecs * 1000)\n\n\n\n# +\n#for interesting distances\n"
  },
  {
    "path": "Chapter08/Intro.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nfrom collections import defaultdict\n\nimport requests\n\nfrom Bio import ExPASy, SwissProt\n# -\n\n#explain why not biopython\nserver = 'https://rest.uniprot.org/uniprotkb/search'\ndef do_request(server, **kwargs):\n    params = ''\n    req = requests.get(server, params=kwargs)\n    if not req.ok:\n        req.raise_for_status()\n    return req\n\n\nreq = do_request(server,\n    # 1. Filtering human p53, reviewed entries\n    query='gene:p53 AND reviewed:true AND organism_id:9606',\n    format='tsv',\n    # 2. Specifying output columns with REST API field names\n    fields='accession,id,protein_name,gene_names,organism_name,length',\n    size=50\n)\nprint(req.text)\n\n#We might revisit this for KEGG\n\n# +\n#XXX - stringio\nimport pandas as pd\nimport io\n\nuniprot_list = pd.read_table(io.StringIO(req.text))\nuniprot_list.rename(columns={'Organism ID': 'ID'}, \ninplace=True)\nprint(uniprot_list)\n# -\n\np53_human = uniprot_list[\n    (uniprot_list.Entry == 'P04637') &\n    (uniprot_list['Entry Name'].str.contains('P53_HUMAN'))]['Entry'].iloc[0]\n\nhandle = ExPASy.get_sprot_raw(p53_human)\n\nsp_rec = SwissProt.read(handle)\n\nprint(sp_rec.entry_name, sp_rec.sequence_length, sp_rec.gene_name)\nprint(sp_rec.description)\nprint(sp_rec.organism, sp_rec.seqinfo)\nprint(sp_rec.sequence)\n\nprint(sp_rec.comments)\nprint(sp_rec.keywords)\n\nhelp(sp_rec)\n\ndone_features = set()\nprint('Total features:', len(sp_rec.features))\nfor feature in sp_rec.features:\n    if feature in done_features:\n        continue\n    else:\n        done_features.add(feature)\n        print(feature)\nprint('Cross references: ',len(sp_rec.cross_references))\nper_source = defaultdict(list)\nfor xref in sp_rec.cross_references:\n    source = xref[0]\n    per_source[source].append(xref[1:])\nprint(per_source.keys())\ndone_GOs = set()\nprint('Annotation SOURCES:', len(per_source['GO']))\nfor annot in per_source['GO']:\n    if annot[1][0] in done_GOs:\n        continue\n    else:\n        done_GOs.add(annot[1][0])\n        print(annot)\n\n\n"
  },
  {
    "path": "Chapter08/Mass.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nimport numpy as np\nimport pandas as pd\n\nfrom Bio import PDB\n\n# +\n# #!rm -f 1tup.cif 2>/dev/null\n# #!wget \"http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=cif&compression=NO&structureId=1TUP\" -O 1tup.cif\n#parser = PDB.MMCIFParser()\n#p53_1tup = parser.get_structure('P53', '1tup.cif')\n# -\n\nrepository = PDB.PDBList()\nparser = PDB.PDBParser()\nrepository.retrieve_pdb_file('1TUP', pdir='.', file_format='pdb')\np53_1tup = parser.get_structure('P 53', 'pdb1tup.ent')\n\nmy_residues = set()\nfor residue in p53_1tup.get_residues():\n    my_residues.add(residue.id[0])\nprint(my_residues)\n\n\n# +\ndef get_mass(atoms, accept_fun=lambda atom: atom.parent.id[0] != 'W'):\n    return sum([atom.mass for atom in atoms if accept_fun(atom)])\n\nchain_names = [chain.id for chain in p53_1tup.get_chains()]\nmy_mass = np.ndarray((len(chain_names), 3))\nfor i, chain in enumerate(p53_1tup.get_chains()):\n    my_mass[i, 0] = get_mass(chain.get_atoms())\n    my_mass[i, 1] = get_mass(chain.get_atoms(), accept_fun=lambda atom: atom.parent.id[0] not in [' ', 'W'])\n    my_mass[i, 2] = get_mass(chain.get_atoms(), accept_fun=lambda atom: atom.parent.id[0] == 'W')\nmasses = pd.DataFrame(my_mass, index=chain_names, columns=['No Water', 'Zincs', 'Water'])\nmasses\n\n\n# -\n\ndef get_center(atoms, weight_fun=lambda atom: 1 if atom.parent.id[0] != 'W' else 0):\n    xsum = ysum = zsum = 0.0\n    acum = 0.0\n    for atom in atoms:\n        x, y, z = atom.coord\n        weight = weight_fun(atom)\n        acum += weight\n        xsum += weight * x\n        ysum += weight * y\n        zsum += weight * z\n    return xsum / acum, ysum / acum, zsum / acum\n\n\nprint(get_center(p53_1tup.get_atoms()))\nprint(get_center(p53_1tup.get_atoms(),\n                 weight_fun=lambda atom: atom.mass if atom.parent.id[0] != 'W' else 0))\n\nmy_center = np.ndarray((len(chain_names), 6))\nfor i, chain in enumerate(p53_1tup.get_chains()):\n    x, y, z = get_center(chain.get_atoms())\n    my_center[i, 0] = x\n    my_center[i, 1] = y\n    my_center[i, 2] = z\n    x, y, z = get_center(chain.get_atoms(), weight_fun=lambda atom: atom.mass if atom.parent.id[0] != 'W' else 0)\n    my_center[i, 3] = x\n    my_center[i, 4] = y\n    my_center[i, 5] = z\nweights = pd.DataFrame(my_center, index=chain_names, columns=['X', 'Y', 'Z', 'X (Mass)', 'Y (Mass)', 'Z (Mass)'])\nweights\n\n# +\n#Pymol viz\n"
  },
  {
    "path": "Chapter08/PDB.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nfrom Bio import PDB\n\nrepository = PDB.PDBList()\nrepository.retrieve_pdb_file('1TUP', pdir='.', file_format='pdb')\nrepository.retrieve_pdb_file('1OLG', pdir='.', file_format='pdb')\nrepository.retrieve_pdb_file('1YCQ', pdir='.', file_format='pdb')\n\nparser = PDB.PDBParser()\np53_1tup = parser.get_structure('P 53 - DNA Binding', 'pdb1tup.ent')\np53_1olg = parser.get_structure('P 53 - Tetramerization', 'pdb1olg.ent')\np53_1ycq = parser.get_structure('P 53 - Transactivation', 'pdb1ycq.ent')\n\n\n# +\ndef print_pdb_headers(headers, indent=0):\n    ind_text = ' ' * indent\n    for header, content in headers.items():\n        if type(content) == dict:\n            print('\\n%s%20s:' % (ind_text, header))\n            print_pdb_headers(content, indent + 4)\n            print()\n        elif type(content) == list:\n            print('%s%20s:' % (ind_text, header))\n            for elem in content:\n                print('%s%21s %s' % (ind_text, '->', elem))\n        else:\n            print('%s%20s: %s' % (ind_text, header, content))\n\nprint_pdb_headers(p53_1tup.header)\n# -\n\nprint(p53_1tup.header['compound'])\nprint(p53_1olg.header['compound'])\nprint(p53_1ycq.header['compound'])\n\n\ndef describe_model(name, pdb):\n    print()\n    for model in pdb:\n        for chain in model:\n            print('%s - Chain: %s. Number of residues: %d. Number of atoms: %d.' %\n                  (name, chain.id, len(chain), len(list(chain.get_atoms()))))\ndescribe_model('1TUP', p53_1tup)\ndescribe_model('1OLG', p53_1olg)\ndescribe_model('1YCQ', p53_1ycq)\n#will go deep in a next recipe (bottom up)\n\nfor residue in p53_1tup.get_residues():\n    if residue.id[0] in [' ', 'W']:\n        continue\n    print(residue.id)\n\nres = next(p53_1tup[0]['A'].get_residues())\nprint(res)\nfor atom in res:\n    print(atom, atom.serial_number, atom.element)\nprint(p53_1tup[0]['A'][94]['CA'])\n\n# +\nfrom Bio.SeqIO import PdbIO, FastaIO\nfrom Bio import SeqIO\n\ndef get_fasta(pdb_file, fasta_file, transfer_ids=None):\n    records = list(PdbIO.PdbSeqresIterator(pdb_file))\n    if transfer_ids is not None:\n        records = [rec for rec in records if rec.id in transfer_ids and len(rec.seq) > 0]\n    else:\n        records = [rec for rec in records if len(rec.seq) > 0]\n    \n    with open(fasta_file, 'w') as out_handle:\n        SeqIO.write(records, out_handle, 'fasta')\n    for rec in records:\n       print(rec.id, rec.seq, len(rec.seq))\n        \n        \nget_fasta('pdb1tup.ent', '1tup.fasta', transfer_ids=['1TUP:B'])\nget_fasta('pdb1olg.ent', '1olg.fasta', transfer_ids=['1OLG:B'])\nget_fasta('pdb1ycq.ent', '1ycq.fasta', transfer_ids=['1YCQ:B'])\n# -\n\n\n"
  },
  {
    "path": "Chapter08/Parser.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.3\n#   kernelspec:\n#     display_name: Python 3\n#     language: python\n#     name: python3\n# ---\n\nfrom Bio import PDB\n\n#XXX\nrepository = PDB.PDBList()\nrepository.retrieve_pdb_file('1TUP', pdir='.', file_format='pdb')\n\n# +\nrec_types = {\n    #single line\n    'HEADER': [(str, 11, 49), (str, 50, 58), (str, 62, 65)],\n    #multi_line\n    'SOURCE': [(int, 7, 9), (str, 10, 78)],\n    #multi_rec\n    'LINK' : [(str, 12, 15), (str, 16, 16), (str, 17, 19), (str, 21, 21), (int, 22, 25),\n              (str, 26, 26), (str, 42, 45), (str, 46, 46), (str, 47, 49), (str, 51, 51),\n              (int, 52, 55), (str, 56, 56), (str, 59, 64), (str, 66, 71), (float, 73, 77)],\n    'HELIX': [(int, 7, 9), (str, 11, 13), (str, 15, 17), (str, 19, 19), (int, 21, 24),\n              (str, 25, 25), (str, 27, 29), (str, 31, 31),\n              (int, 33, 36), (str, 37 ,37), (int, 38, 39), (str, 40, 69), (int, 71, 75)],\n    'SHEET': [(int, 7, 9), (str, 11, 13), (int, 14, 15), (str, 17, 19), (str, 21, 21),\n              (int, 22, 24), (str, 26, 26), (str, 28, 30),\n              (str, 32, 32), (int, 33, 36), (str, 37, 37), (int, 38, 39), (str, 41, 44),\n              (str, 45, 47), (str, 49, 49), (int, 50, 53), (str, 54, 54), (str, 56, 59),\n              (str, 60, 62), (str, 64, 64), (int, 65, 68), (str, 69, 69)],\n}\n\ndef parse_pdb(hdl):\n    for line in hdl:\n        line = line[:-1]  # remove \\n but not other whitespace\n        toks = []\n        for section, elements in rec_types.items():\n            if line.startswith(section):\n                for fun, start, end in elements:\n                    try:\n                        toks.append(fun(line[start: end + 1]))\n                    except ValueError:\n                        toks.append(None)  # eg continuation\n                yield (section, toks)\n        if len(toks) == 0:\n            yield ('UNKNOWN', line)\n                \n\n\n# -\n\nhdl = open('pdb1tup.ent')\ndone_rec = set()\nfor rec in parse_pdb(hdl):\n    if rec[0] == 'UNKNOWN' or rec[0] in done_rec:\n        continue\n    print(rec)\n    done_rec.add(rec[0])\n\n# +\nmulti_lines = ['SOURCE']\n\n#assume multi is just a string\ndef process_multi_lines(hdl):\n    current_multi = ''\n    current_multi_name = None\n    for rec_type, toks in parse_pdb(hdl):\n        if current_multi_name is not None and current_multi_name != rec_type:\n            yield current_multi_name, [current_multi]\n            current_multi = ''\n            current_multi_name = None\n        if rec_type in multi_lines:\n            current_multi += toks[1].strip().rstrip() + ' '\n            current_multi_name = rec_type\n        else:\n            if len(current_multi) != 0:\n                yield current_multi_name, [current_multi]\n                current_multi = ''\n                current_multi_name = None                \n            yield rec_type, toks\n    if len(current_multi) != 0:\n        yield current_multi_name, [current_multi]\n\n\n# -\n\nhdl = open('pdb1tup.ent')\ndone_rec = set()\nfor rec in process_multi_lines(hdl):\n    if rec[0] == 'UNKNOWN' or rec[0] in done_rec:\n        continue\n    print(rec)\n    done_rec.add(rec[0])\n\n\n# +\ndef get_spec_list(my_str):\n    #ignoring escape characters\n    spec_list = {}\n    elems = my_str.strip().strip().split(';')\n    for elem in elems:\n        toks = elem.split(':')\n        spec_list[toks[0].strip()] = toks[1].strip()\n    return spec_list\n\nstruct_types = {\n    'SOURCE': [get_spec_list] \n}\n\ndef process_struct_types(hdl):\n    for rec_type, toks in process_multi_lines(hdl):\n        if rec_type in struct_types.keys():\n            funs = struct_types[rec_type]\n            struct_toks = []\n            for tok, fun in zip(toks, funs):\n                struct_toks.append(fun(tok))\n            yield rec_type, struct_toks\n        else:\n            yield rec_type, toks\n\n\n# -\n\nhdl = open('pdb1tup.ent')\nfor rec in process_struct_types(hdl):\n    if rec[0] != 'SOURCE':\n        continue\n    print(rec)\n\n\n"
  },
  {
    "path": "Chapter08/PyMol_Intro.py",
    "content": "import threading\ndef dump_thread():\n    print\n    for thr in threading.enumerate():\n        print(thr)\ndump_thread()\nimport pymol\npymol.pymol_launch=4\npymol.pymol_argv = [ 'pymol', '-qc'] #  Quiet / no GUI\nfrom pymol import cmd\npymol.finish_launching()\ndump_thread()\n\n#cmd.fetch('1TUP', async=False)\ncmd.fetch('1TUP')\ncmd.disable('all')\ncmd.enable('1TUP')\ncmd.bg_color('white')\ncmd.hide('all')\ncmd.show('cartoon')\n#cmd.hide('cartoon', 'chain E+F')\n#cmd.show('ribbon', 'chain E+F')\ncmd.select('zinc', 'name zn')\ncmd.show('sphere', 'zinc')\ncmd.set('ray_trace_mode', 3)\ncmd.png('1TUP.png', width=1980, height=1080, quiet=0, ray=1, prior=False)\ndump_thread()\n\ncmd.set('ray_trace_mode', 1)\ncmd.png('TUP.png', width=1980, height=1080, quiet=0, ray=1, prior=False)\ncmd.quit()\n"
  },
  {
    "path": "Chapter08/PyMol_Movie.py",
    "content": "import pymol\nfrom pymol import cmd\n#pymol.pymol_argv = [ 'pymol', '-qc'] #  Quiet / no GUI\npymol.finish_launching()\n\n#cmd.fetch('1TUP', async=False)\ncmd.fetch('1TUP')\n\ncmd.disable('all')\ncmd.enable('1TUP')\ncmd.hide('all')\ncmd.show('sphere', 'name zn')\n\ncmd.show('surface', 'chain A+B+C')\ncmd.show('cartoon', 'chain E+F')\ncmd.scene('S0', action='store', view=0, frame=0, animate=-1)\n\ncmd.show('cartoon')\ncmd.hide('surface')\n\ncmd.scene('S1', action='store', view=0, frame=0, animate=-1)\n\ncmd.hide('cartoon', 'chain A+B+C')\ncmd.show('mesh', 'chain A')\ncmd.show('sticks', 'chain A+B+C')\ncmd.scene('S2', action='store', view=0, frame=0, animate=-1)\n\ncmd.set('ray_trace_mode', 0)\ncmd.mset(1, 500)\n\n\ncmd.frame(0)\ncmd.scene('S0')\ncmd.mview()\ncmd.frame(60)\ncmd.set_view((-0.175534308,   -0.331560850,   -0.926960170,\n             0.541812420,     0.753615797,   -0.372158051,\n             0.821965039,    -0.567564785,    0.047358301,\n             0.000000000,     0.000000000, -249.619018555,\n             58.625568390,   15.602619171,   77.781631470,\n             196.801528931, 302.436492920,  -20.000000000))\n\ncmd.mview()\ncmd.frame(90)\ncmd.set_view((-0.175534308,   -0.331560850,   -0.926960170,\n              0.541812420,    0.753615797,   -0.372158051,\n              0.821965039,   -0.567564785,    0.047358301,\n              -0.000067875,    0.000017881, -249.615447998,\n              54.029174805,   26.956727982,   77.124832153,\n             196.801528931,  302.436492920,  -20.000000000))\ncmd.mview()\ncmd.frame(150)\ncmd.set_view((-0.175534308,   -0.331560850,   -0.926960170,\n              0.541812420,    0.753615797,   -0.372158051,\n              0.821965039,   -0.567564785,    0.047358301,\n              -0.000067875,    0.000017881,  -55.406421661,\n              54.029174805,   26.956727982,   77.124832153,\n              2.592475891,  108.227416992,  -20.000000000))\ncmd.mview()\ncmd.frame(200)\ncmd.scene('S1')\ncmd.mview()\ncmd.frame(350)\ncmd.scene('S1')\ncmd.set_view((0.395763457,   -0.173441306,    0.901825786,\n              0.915456235,    0.152441502,   -0.372427106,\n             -0.072881661,    0.972972929,    0.219108686,\n              0.000070953,    0.000013039,  -37.689743042,\n             57.748500824,   14.325904846,   77.241867065,\n             -15.123448372,   90.511535645,  -20.000000000))\n\ncmd.mview()\ncmd.frame(351)\ncmd.scene('S2')\ncmd.mview()\n\ncmd.frame(500)\ncmd.scene('S2')\ncmd.mview()\ncmd.mplay()\ncmd.mpng('p53_1tup')\n\ncmd.quit()\n"
  },
  {
    "path": "Chapter08/Stats.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.8\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\nfrom collections import defaultdict\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n# #%matplotlib inline\n\nfrom Bio import PDB\n# -\n\nrepository = PDB.PDBList()\nparser = PDB.PDBParser()\nrepository.retrieve_pdb_file('1TUP', pdir='.', file_format='pdb') #XXX\np53_1tup = parser.get_structure('P 53', 'pdb1tup.ent')\n\n# +\natom_cnt = defaultdict(int)\natom_chain = defaultdict(int)\natom_res_types = defaultdict(int)\n\nfor atom in p53_1tup.get_atoms():\n    my_residue = atom.parent\n    my_chain = my_residue.parent\n    atom_chain[my_chain.id] += 1\n    if my_residue.resname != 'HOH':\n        atom_cnt[atom.element] += 1\n    atom_res_types[my_residue.resname] += 1\nprint(dict(atom_res_types))\nprint(dict(atom_chain))\nprint(dict(atom_cnt))\n# -\n\nres_types = defaultdict(int)\nres_per_chain = defaultdict(int)\nfor residue in p53_1tup.get_residues():\n    res_types[residue.resname] += 1\n    res_per_chain[residue.parent.id] +=1\nprint(dict(res_types))\nprint(dict(res_per_chain))\n\n\ndef get_bounds(my_atoms):\n    my_min = [sys.maxsize] * 3\n    my_max = [-sys.maxsize] * 3\n    for atom in my_atoms:\n        for i, coord in enumerate(atom.coord):\n            if coord < my_min[i]:\n                my_min[i] = coord\n            if coord > my_max[i]:\n                my_max[i] = coord\n    return my_min, my_max\n\n\nchain_bounds = {}\nfor chain in p53_1tup.get_chains():\n    print(chain.id, get_bounds(chain.get_atoms()))\n    chain_bounds[chain.id] = get_bounds(chain.get_atoms())\nprint(get_bounds(p53_1tup.get_atoms()))\n\n#matplotlib 3d plot\nfig = plt.figure(figsize=(16, 9))\nax3d = fig.add_subplot(111, projection='3d')\nax_xy = fig.add_subplot(331)\nax_xy.set_title('X/Y')\nax_xz = fig.add_subplot(334)\nax_xz.set_title('X/Z')\nax_zy = fig.add_subplot(337)\nax_zy.set_title('Z/Y')\ncolor = {'A': 'r', 'B': 'g', 'C': 'b', 'E': '0.5', 'F': '0.75'}\nzx, zy, zz = [], [], []\nfor chain in p53_1tup.get_chains():\n    xs, ys, zs = [], [], []\n    for residue in chain.get_residues():\n        ref_atom = next(residue.get_iterator())\n        x, y, z = ref_atom.coord\n        if ref_atom.element == 'ZN':\n            zx.append(x)\n            zy.append(y)\n            zz.append(z)\n            continue\n        xs.append(x)\n        ys.append(y)\n        zs.append(z)\n    ax3d.scatter(xs, ys, zs, color=color[chain.id])\n    ax_xy.scatter(xs, ys, marker='.', color=color[chain.id])\n    ax_xz.scatter(xs, zs, marker='.', color=color[chain.id])\n    ax_zy.scatter(zs, ys, marker='.', color=color[chain.id])\nax3d.set_xlabel('X')\nax3d.set_ylabel('Y')\nax3d.set_zlabel('Z')\nax3d.scatter(zx, zy, zz, color='k', marker='v', s=300)\nax_xy.scatter(zx, zy, color='k', marker='v', s=80)\nax_xz.scatter(zx, zz, color='k', marker='v', s=80)\nax_zy.scatter(zz, zy, color='k', marker='v', s=80)\nfor ax in [ax_xy, ax_xz, ax_zy]:\n    ax.get_yaxis().set_visible(False)\n    ax.get_xaxis().set_visible(False)\n\n\n"
  },
  {
    "path": "Chapter08/mmCIF.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.3\n#   kernelspec:\n#     display_name: Python 3\n#     language: python\n#     name: python3\n# ---\n\nfrom Bio import PDB\n\n# !rm -f 1tup.cif 2>/dev/null\n# !wget \"http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=cif&compression=NO&structureId=1TUP\" -O 1tup.cif\n\nparser = PDB.MMCIFParser()\np53_1tup = parser.get_structure('P53_HUMAN', '1tup.cif')\n\n\ndef describe_model(name, pdb):\n    print()\n    for model in p53_1tup:\n        for chain in model:\n            print('%s - Chain: %s. Number of residues: %d. Number of atoms: %d.' %\n                  (name, chain.id, len(chain), len(list(chain.get_atoms()))))\ndescribe_model('1TUP', p53_1tup)\n\ndone_chain = set()\nfor residue in p53_1tup.get_residues():\n    chain = residue.parent\n    if chain.id in done_chain:\n        continue\n    done_chain.add(chain.id)\n    print(chain.id, residue.id)\n\nmmcif_dict = PDB.MMCIF2Dict.MMCIF2Dict('1tup.cif')\n\nfor k, v in mmcif_dict.items():\n    print(k, v)\n    print()\n\n\n"
  },
  {
    "path": "Chapter09/galaxy/.gitignore",
    "content": "galaxy.yaml.enc\ntool\nsalt"
  },
  {
    "path": "Chapter09/galaxy/LCT.bed",
    "content": "track name=gene description=\"Gene information\"\n2\t135836529\t135837180\tENSE00002202258\t0\t-\n2\t135833110\t135833190\tENSE00001660765\t0\t-\n2\t135829592\t135829676\tENSE00001731451\t0\t-\n2\t135823900\t135824003\tENSE00001659892\t0\t-\n2\t135822019\t135822098\tENSE00001777620\t0\t-\n2\t135817340\t135818061\tENSE00001602826\t0\t-\n2\t135812310\t135812956\tENSE00000776576\t0\t-\n2\t135808442\t135809993\tENSE00001008768\t0\t-\n2\t135807127\t135807396\tENSE00000776573\t0\t-\n2\t135804766\t135805057\tENSE00000776572\t0\t-\n2\t135803929\t135804128\tENSE00000776571\t0\t-\n2\t135800606\t135800809\tENSE00000776570\t0\t-\n2\t135798028\t135798138\tENSE00003515081\t0\t-\n2\t135794640\t135794775\tENSE00001630333\t0\t-\n2\t135790657\t135790881\tENSE00001667885\t0\t-\n2\t135789570\t135789798\tENSE00001728878\t0\t-\n2\t135787839\t135788544\tENSE00001653704\t0\t-\n2\t135812310\t135812959\tENSE00001745158\t0\t-\n2\t135808442\t135809993\tENSE00001008768\t0\t-\n2\t135807127\t135807396\tENSE00000776573\t0\t-\n2\t135804766\t135805057\tENSE00000776572\t0\t-\n2\t135803929\t135804128\tENSE00000776571\t0\t-\n2\t135798028\t135798138\tENSE00003459353\t0\t-\n2\t135794336\t135794775\tENSE00001635523\t0\t-\n2\t135810168\t135810279\tENSE00001438557\t0\t-\n2\t135820190\t135820639\tENSE00001732580\t0\t+\n2\t135821674\t135823087\tENSE00001695040\t0\t+\n2\t135836529\t135837180\tNM_002299.2.1\t0\t-\n2\t135833110\t135833190\tNM_002299.2.2\t0\t-\n2\t135829592\t135829676\tNM_002299.2.3\t0\t-\n2\t135823900\t135824003\tNM_002299.2.4\t0\t-\n2\t135822019\t135822098\tNM_002299.2.5\t0\t-\n2\t135817340\t135818061\tNM_002299.2.6\t0\t-\n2\t135812310\t135812956\tNM_002299.2.7\t0\t-\n2\t135808442\t135809993\tNM_002299.2.8\t0\t-\n2\t135807127\t135807396\tNM_002299.2.9\t0\t-\n2\t135804766\t135805057\tNM_002299.2.10\t0\t-\n2\t135803929\t135804128\tNM_002299.2.11\t0\t-\n2\t135800606\t135800809\tNM_002299.2.12\t0\t-\n2\t135798028\t135798138\tNM_002299.2.13\t0\t-\n2\t135794640\t135794775\tNM_002299.2.14\t0\t-\n2\t135790657\t135790881\tNM_002299.2.15\t0\t-\n2\t135789570\t135789798\tNM_002299.2.16\t0\t-\n2\t135787844\t135788544\tNM_002299.2.17\t0\t-\n2\t135836529\t135837169\tCCDS2178.117\t0\t-\n2\t135833110\t135833190\tCCDS2178.116\t0\t-\n2\t135829592\t135829676\tCCDS2178.115\t0\t-\n2\t135823900\t135824003\tCCDS2178.114\t0\t-\n2\t135822019\t135822098\tCCDS2178.113\t0\t-\n2\t135817340\t135818061\tCCDS2178.112\t0\t-\n2\t135812310\t135812956\tCCDS2178.111\t0\t-\n2\t135808442\t135809993\tCCDS2178.110\t0\t-\n2\t135807127\t135807396\tCCDS2178.19\t0\t-\n2\t135804766\t135805057\tCCDS2178.18\t0\t-\n2\t135803929\t135804128\tCCDS2178.17\t0\t-\n2\t135800606\t135800809\tCCDS2178.16\t0\t-\n2\t135798028\t135798138\tCCDS2178.15\t0\t-\n2\t135794640\t135794775\tCCDS2178.14\t0\t-\n2\t135790657\t135790881\tCCDS2178.13\t0\t-\n2\t135789570\t135789798\tCCDS2178.12\t0\t-\n2\t135788323\t135788544\tCCDS2178.11\t0\t-\n"
  },
  {
    "path": "Chapter09/galaxy/api.py",
    "content": "import base64\nfrom collections import defaultdict\n#import ftplib\n\nimport getpass\nimport pprint\nimport warnings\n\nfrom ruamel.yaml import YAML\n\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\nimport pandas as pd\n\nfrom bioblend.galaxy import GalaxyInstance\n\nimport paramiko\n\npp = pprint.PrettyPrinter()\nwarnings.filterwarnings('ignore')\n# explain above, and warn\n\n\nwith open('galaxy.yaml.enc', 'rb') as f:\n    enc_conf = f.read()\n\n\npassword = getpass.getpass('Please enter the password:').encode()\nwith open('salt', 'rb') as f:\n    salt = f.read()\nkdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt,\n                 iterations=100000, backend=default_backend())\nkey = base64.urlsafe_b64encode(kdf.derive(password))\nfernet = Fernet(key)\n\nyaml = YAML()\nconf = yaml.load(fernet.decrypt(enc_conf).decode())\n\nserver = conf['server']\nrest_protocol = conf['rest_protocol']\nrest_port = conf['rest_port']\nuser = conf['user']\npassword = conf['password']\nsftp_port = int(conf['sftp_port'])\napi_key = conf['api_key']\n\nrest_url = '%s://%s:%d' % (rest_protocol, server, rest_port)\n\nhistory_name = 'bioinf_example'\n\ngi = GalaxyInstance(url=rest_url, key=api_key)\ngi.verify = False\nhistories = gi.histories\n\nprint('Existing histories:')\nfor history in histories.get_histories():\n    if history['name'] == history_name:\n        histories.delete_history(history['id'])\n    print('  - ' + history['name'])\nprint()\n\nds_history = histories.create_history(history_name)\n\n\nprint('Uploading file')\ntransport = paramiko.Transport((server, sftp_port))\ntransport.connect(None, user, password)\nsftp = paramiko.SFTPClient.from_transport(transport)\nsftp.put('LCT.bed', 'LCT.bed')\nsftp.close()\ntransport.close()\n#ftp = ftplib.FTP() \n#ftp.connect(host=server, port=ftp_port)\n#ftp.login(user=user, passwd=password)\n#f = open('LCT.bed', 'rb')\n#ftp.set_pasv(True)  # explain\n##ftp.storbinary('STOR LCT.bed', f)\n#s = ftp.transfercmd('STOR LCT.bed')\n#s.send(f.read())\n#s.close()\n#f.close() \n#ftp.close()\n\ngi.tools.upload_from_ftp('LCT.bed', ds_history['id'])\nprint()\n\ncontents = gi.histories.show_history(ds_history['id'], contents=True)\n\ndef summarize_contents(contents):\n    summary = defaultdict(list)\n    for item in contents:\n        summary['íd'].append(item['id'])\n        summary['híd'].append(item['hid'])\n        summary['name'].append(item['name'])\n        summary['type'].append(item['type'])\n        summary['extension'].append(item['extension'])\n    return pd.DataFrame.from_dict(summary)\n\nprint('History contents:')\npd_contents = summarize_contents(contents)\nprint(pd_contents)\nprint()\n\nprint('Metadata for LCT.bed')\nbed_ds = contents[0]\npp.pprint(bed_ds)\nprint()\n\nprint('Metadata about all tools')\nall_tools = gi.tools.get_tools()\npp.pprint(all_tools)\nprint()\n\nbed2gff = gi.tools.get_tools(name='Convert BED to GFF')[0]\nprint(\"Convert BED to GFF metadata:\")\npp.pprint(gi.tools.show_tool(bed2gff['id'], io_details=True, link_details=True))\nprint()\n\ndef dataset_to_param(dataset):\n    return dict(src='hda', id=dataset['id'])\n\ntool_inputs = {\n    'input1': dataset_to_param(bed_ds)\n    }\n\n#hid!\n\n\ngi.tools.run_tool(ds_history['id'], bed2gff['id'], tool_inputs=tool_inputs)\n"
  },
  {
    "path": "Chapter09/galaxy/encrypt.py",
    "content": "\"Encrypt an YAML file with the script configuration\"\n\nimport base64\nimport getpass\nfrom io import StringIO\nimport os\n\nfrom ruamel.yaml import YAML\n\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\npassword = getpass.getpass('Please enter the password:').encode()\n\nsalt = os.urandom(16)\nkdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt,\n                 iterations=100000, backend=default_backend())\nkey = base64.urlsafe_b64encode(kdf.derive(password))\nfernet = Fernet(key)\n\nwith open('salt', 'wb') as w:\n    w.write(salt)\n\n\nyaml = YAML()\n\ncontent = yaml.load(open('galaxy.yaml', 'rt', encoding='utf-8'))\nprint(type(content), content)\noutput = StringIO()\nyaml.dump(content, output)\nprint ('Encrypting:\\n%s' % output.getvalue())\n\nenc_output = fernet.encrypt(output.getvalue().encode())\n\nwith open('galaxy.yaml.enc', 'wb') as w:\n    w.write(enc_output)\n\n\nprint(\"Complete, the clear version should be deleted now\")\n"
  },
  {
    "path": "Chapter09/galaxy/galaxy.yaml",
    "content": "rest_protocol: http\nserver: localhost\nrest_port: 8080\nsftp_port: 8022\nuser: admin@galaxy.org\npassword: password\napi_key: fakekey\n"
  },
  {
    "path": "Chapter09/nextflow/.gitignore",
    "content": "data\npca.png\nwork\n.nextflow*\nreport"
  },
  {
    "path": "Chapter09/nextflow/pipeline.nf",
    "content": "nextflow.enable.dsl=2\n\ndownload_root = \"https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3\"\n\n\nprocess plink_download {\n  output:\n  path 'hapmap.map.gz'//, emit: mapgz\n  path 'hapmap.ped.gz'//, emit: pedgz\n \n  script:\n  \"\"\"\n  wget $download_root/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.map.gz -O hapmap.map.gz\n  wget $download_root/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.ped.gz -O hapmap.ped.gz\n   \"\"\"\n}\n\n\nprocess uncompress_plink {\n  publishDir 'data', glob: '*', mode: 'copy'\n  \n  input:\n  path mapgz\n  path pedgz\n\n  output:\n  path 'hapmap.map'\n  path 'hapmap.ped'\n\n  script:\n  \"\"\"\n  gzip -dc $mapgz > hapmap.map\n  gzip -dc $pedgz > hapmap.ped\n  \"\"\"\n}\n\n//DSL 2 and docs\n//conda\n\nprocess subsample_1p {\n  input:\n  path 'hapmap.map'\n  path 'hapmap.ped'\n\n  output:\n  path 'hapmap1.map'\n  path 'hapmap1.ped'\n\n  script:\n  \"\"\"\n  plink2 --pedmap hapmap --out hapmap1 --thin 0.01 --geno 0.1 --export ped\n  \"\"\"\n}\n\nprocess plink_pca {\n  input:\n  path 'hapmap.map'\n  path 'hapmap.ped'\n\n  output:\n  path 'hapmap.eigenvec'\n  path 'hapmap.eigenval'\n\n  script:\n  \"\"\"\n  plink2 --pca --pedmap hapmap -out hapmap\n  \"\"\"\n}\n\n\nprocess plot_pca {\n  publishDir '.', glob: '*', mode: 'copy'\n\n  input:\n  path 'hapmap.eigenvec'\n  path 'hapmap.eigenval'\n\n  output:\n  path 'pca.png'\n\n  script:\n  \"\"\"\n  #!/usr/bin/env python\n  import pandas as pd\n\n  pca_df = pd.read_csv('hapmap.eigenvec', sep='\\t') \n  ax = pca_df.plot.scatter(x=2, y=3, figsize=(16, 9))\n  ax.figure.savefig('pca.png')\n  \"\"\"\n}\n\n\n/*\nworkflow {\n    plink_download | uncompress_plink\n}\n*/\n\n\n/*\nworkflow {\n    ped_file = file('data/hapmap.ped')\n    map_file = file('data/hapmap.map')\n    if (!ped_file.exists() | !map_file.exists()) {\n        plink_download | uncompress_plink\n    }\n}\n*/\n\n\nworkflow {\n    ped_file = file('data/hapmap.ped')\n    map_file = file('data/hapmap.map')\n    if (!ped_file.exists() | !map_file.exists()) {\n        plink_download | uncompress_plink | subsample_1p | plink_pca | plot_pca\n    }\n    else {\n        subsample_1p(\n            Channel.fromPath('data/hapmap.map'),\n            Channel.fromPath('data/hapmap.ped')) | plink_pca | plot_pca\n    }\n}\n"
  },
  {
    "path": "Chapter09/snakemake/.gitignore",
    "content": "data\nscratch\n.snakemake\npca.png\ndag.svg\nbio.png\nbio.svg"
  },
  {
    "path": "Chapter09/snakemake/Snakefile",
    "content": "rule all:\n    input:\n        \"pca.png\"\n\nrule plink_download:\n    output:\n        map=\"scratch/hapmap.map.gz\",\n        ped=\"scratch/hapmap.ped.gz\",\n        rel=\"data/relationships.txt\"\n    shell:\n        \"\"\"\n        python -c \"import urllib.request; urllib.request.urlretrieve(\n            'https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.map.gz', \n            '{output.map}')\"\n        python -c \"import urllib.request; urllib.request.urlretrieve(\n            'https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/plink_format/hapmap3_r3_b36_fwd.consensus.qc.poly.ped.gz', \n            '{output.ped}')\"\n        python -c \"import urllib.request; urllib.request.urlretrieve(\n            'https://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3_r3/relationships_w_pops_041510.txt', \n            '{output.rel}')\"\n        \"\"\"\n\nPLINKEXTS = ['ped', 'map']\n\nrule uncompress_plink:\n    input:\n        \"scratch/hapmap.{plinkext}.gz\"\n\n    output:\n        \"data/hapmap.{plinkext}\"\n\n    shell:\n        \"gzip -dc {input} > {output}\"\n\n\n\nrule subsample_1p:\n    input:\n        \"data/hapmap.ped\",\n        \"data/hapmap.map\"\n\n    output:\n        \"data/hapmap1.ped\",\n        \"data/hapmap1.map\"\n\n    run:\n        shell(f\"plink2 --pedmap {input[0][:-4]} --out {output[0][:-4]} --thin 0.01 --geno 0.1 --export ped\")\n\n# snakemake and software requirements\n\n# https://snakemake.readthedocs.io/en/stable/tutorial/additional_features.html#automatic-deployment-of-software-dependencies\n#plink2 --pedmap data/hapmap --out data/hapmap10 --thin 0.1 --geno 0.1 --export ped\n\nrule plink_pca:\n    input:\n        \"data/hapmap1.ped\",\n        \"data/hapmap1.map\"\n\n    output:\n        \"data/hapmap1.eigenvec\",\n        \"data/hapmap1.eigenval\"\n\n    shell:\n        \"plink2 --pca --pedmap data/hapmap1 -out data/hapmap1\"\n\n\nrule plot_pca:\n    input:\n        \"data/hapmap1.eigenvec\",\n        \"data/hapmap1.eigenval\"\n\n    output:\n        \"pca.png\"\n\n    script:\n        \"./plot_pca.py\"\n\n"
  },
  {
    "path": "Chapter09/snakemake/plot_pca.py",
    "content": "import pandas as pd\n\neigen_fname = snakemake.input[0] if snakemake.input[0].endswith('eigenvec') else snakemake.input[1]\npca_df = pd.read_csv(eigen_fname, sep='\\t') \nax = pca_df.plot.scatter(x=2, y=3, figsize=(16, 9))\nax.figure.savefig(snakemake.output[0]) \n"
  },
  {
    "path": "Chapter10/Clustering.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# + jupyter={\"outputs_hidden\": false}\nimport os\n\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\n\nimport numpy as np\n\nfrom genomics.popgen.pca import plot\n# -\n\n# ## Meta-data load\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('../Chapter06/relationships_w_pops_041510.txt')\nind_pop = {}\nf.readline()  # header\nfor l in f:\n    toks = l.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    pop = toks[-1]\n    ind_pop['/'.join([fam_id, ind_id])] = pop\nf.close()\n# -\n\n# ## With scikit-learn\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('../Chapter06/hapmap10_auto_noofs_ld_12.ped')\nninds = 0\nind_order = []\nfor line in f:\n    ninds += 1\n    toks = line[:100].replace(' ', '\\t').split('\\t') #  for speed\n    fam_id = toks[0]\n    ind_id = toks[1]\n    ind_order.append('%s/%s' % (fam_id, ind_id))\nnsnps = (len(line.replace(' ', '\\t').split('\\t')) - 6) // 2\nprint (nsnps)\nf.close()\n\n# + jupyter={\"outputs_hidden\": false}\nall_array = np.empty((ninds, nsnps), dtype=int)\nf = open('../Chapter06/hapmap10_auto_noofs_ld_12.ped')\nfor ind, line in enumerate(f):\n    snps = line.replace(' ', '\\t').split('\\t')[6:]\n    for pos in range(len(snps) // 2):\n        a1 = int(snps[2 * pos])\n        a2 = int(snps[2 * pos])\n        my_code = a1 + a2 - 2\n        all_array[ind, pos] = my_code\nf.close()\n#slow\n# -\n\npredict_case = all_array[-1, :]\npca_array = all_array[:-1,:]\n\nlast_ind = ind_order[-1]\nlast_ind, ind_pop[last_ind]\n\nmy_pca = PCA(n_components=2)\nmy_pca.fit(pca_array)\ntrans = my_pca.transform(pca_array)\n\nsc_ind_comp = {}\nfor i, ind_pca in enumerate(trans):\n    sc_ind_comp[ind_order[i]] = ind_pca\nplot.render_pca(sc_ind_comp, cluster=ind_pop)\n\n\n# + jupyter={\"outputs_hidden\": false}\ndef plot_kmeans_pca(trans, kmeans):\n    x_min, x_max = trans[:, 0].min() - 1, trans[:, 0].max() + 1\n    y_min, y_max = trans[:, 1].min() - 1, trans[:, 1].max() + 1\n    mesh_x, mesh_y = np.meshgrid(np.arange(x_min, x_max, 0.5), np.arange(y_min, y_max, 0.5))\n\n    k_surface = kmeans.predict(np.c_[mesh_x.ravel(), mesh_y.ravel()]).reshape(mesh_x.shape)\n    fig, ax = plt.subplots(1,1, dpi=300)\n    ax.imshow(\n        k_surface, origin=\"lower\", cmap=plt.cm.Pastel1,\n        extent=(mesh_x.min(), mesh_x.max(), mesh_y.min(), mesh_y.max()),\n    )\n\n    ax.plot(trans[:, 0], trans[:, 1], \"k.\", markersize=2)\n    ax.set_title(\"KMeans clustering of PCA data\")\n    ax.set_xlim(x_min, x_max)\n    ax.set_ylim(y_min, y_max)\n    ax.set_xticks(())\n    ax.set_yticks(())\n    return ax\n\n\n# + jupyter={\"outputs_hidden\": false}\nkmeans11 = KMeans(n_clusters=11).fit(trans)\nplot_kmeans_pca(trans, kmeans11)\n# -\n\nkmeans4 = KMeans(n_clusters=4).fit(trans)\nplot_kmeans_pca(trans, kmeans4)\n\npca_predict = my_pca.transform([predict_case])\nkmeans4.predict(pca_predict)\n\nlast_train = ind_order[-2]\nlast_train, ind_pop[last_train]\n\nkmeans4.predict(trans)[0]\n\n\n"
  },
  {
    "path": "Chapter10/Decision_Tree.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# + jupyter={\"outputs_hidden\": false}\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import tree\n\n# + [markdown] jupyter={\"outputs_hidden\": false}\n# http://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+%28diagnostic%29\n\n# + jupyter={\"outputs_hidden\": false}\n# !wget http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data\n# !wget http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.names\n# -\n\n# ## With scikit-learn\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('breast-cancer-wisconsin.data')\nw = open('clean.data', 'w')\nfor line in f:\n    if line.find('?') > -1:\n        continue\n    w.write(line)\nf.close()\nw.close()\n\n# + jupyter={\"outputs_hidden\": false}\ncolumn_names = [\n    'sample_id', 'clump_thickness', 'uniformity_cell_size',\n    'uniformity_cell shape', 'marginal_adhesion',\n    'single_epithelial_cell_size', 'bare_nuclei',\n    'bland_chromatin', 'normal_nucleoli', 'mitoses',\n    'class'\n]\nsamples = pd.read_csv('clean.data', header=None, names=column_names, index_col=0)\nsamples\n\n# + jupyter={\"outputs_hidden\": false}\ntraining_input = samples.iloc[:,:-1]\ntarget = samples.iloc[:,-1].apply(lambda x: 0 if x == 2 else 1)\n\n# + jupyter={\"outputs_hidden\": false}\nclf = tree.DecisionTreeClassifier(max_depth=3)\n\n# + jupyter={\"outputs_hidden\": false}\nclf.fit(training_input, target)\n\n# + jupyter={\"outputs_hidden\": false}\nimportances = pd.Series(\n    clf.feature_importances_ * 100,\n    index=training_input.columns).sort_values(ascending=False)\nimportances\n\n# + jupyter={\"outputs_hidden\": false}\n100 * clf.score(training_input, target)\n\n# + jupyter={\"outputs_hidden\": false}\nfig, ax = plt.subplots(1, dpi=300)\ntree.plot_tree(clf,ax=ax, feature_names=training_input.columns, class_names=['Benign', 'Malignant'])\n# -\n\n\n\n\n"
  },
  {
    "path": "Chapter10/PCA.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# + jupyter={\"outputs_hidden\": false}\nimport os\n\nfrom sklearn.decomposition import PCA\nimport numpy as np\n\nfrom genomics.popgen.pca import plot\n# -\n\n# ## Meta-data load\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('../Chapter06/relationships_w_pops_041510.txt')\nind_pop = {}\nf.readline()  # header\nfor l in f:\n    toks = l.rstrip().split('\\t')\n    fam_id = toks[0]\n    ind_id = toks[1]\n    pop = toks[-1]\n    ind_pop['/'.join([fam_id, ind_id])] = pop\nf.close()\n# -\n\n# ## With scikit-learn\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('../Chapter06/hapmap10_auto_noofs_ld_12.ped')\nninds = 0\nind_order = []\nfor line in f:\n    ninds += 1\n    toks = line[:100].replace(' ', '\\t').split('\\t') #  for speed\n    fam_id = toks[0]\n    ind_id = toks[1]\n    ind_order.append('%s/%s' % (fam_id, ind_id))\nnsnps = (len(line.replace(' ', '\\t').split('\\t')) - 6) // 2\nf.close()\n\n# + jupyter={\"outputs_hidden\": false}\npca_array = np.empty((ninds, nsnps), dtype=int)\nprint(pca_array.shape)\nf = open('../Chapter06/hapmap10_auto_noofs_ld_12.ped')\nfor ind, line in enumerate(f):\n    snps = line.replace(' ', '\\t').split('\\t')[6:]\n    for pos in range(len(snps) // 2):\n        a1 = int(snps[2 * pos])\n        a2 = int(snps[2 * pos])\n        my_code = a1 + a2 - 2\n        pca_array[ind, pos] = my_code\nf.close()\n\n# + jupyter={\"outputs_hidden\": false}\nmy_pca = PCA(n_components=8)\nmy_pca.fit(pca_array)\ntrans = my_pca.transform(pca_array)\n#Memory required\n\n# + jupyter={\"outputs_hidden\": false}\nsc_ind_comp = {}\nfor i, ind_pca in enumerate(trans):\n    sc_ind_comp[ind_order[i]] = ind_pca\nplot.render_pca_eight(sc_ind_comp, cluster=ind_pop)\n\n# + jupyter={\"outputs_hidden\": false}\n\n"
  },
  {
    "path": "Chapter10/Random_Forest.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.14.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# + jupyter={\"outputs_hidden\": false}\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import export_graphviz\n\n# + [markdown] jupyter={\"outputs_hidden\": false}\n# http://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+%28diagnostic%29\n\n# + jupyter={\"outputs_hidden\": false}\n# !wget http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data\n# !wget http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.names\n# -\n\n# ## With scikit-learn\n\n# + jupyter={\"outputs_hidden\": false}\nf = open('breast-cancer-wisconsin.data')\nw = open('clean.data', 'w')\nfor line in f:\n    if line.find('?') > -1:\n        continue\n    w.write(line)\nf.close()\nw.close()\n\n# + jupyter={\"outputs_hidden\": false}\ncolumn_names = [\n    'sample_id', 'clump_thickness', 'uniformity_cell_size',\n    'uniformity_cell shape', 'marginal_adhesion',\n    'single_epithelial_cell_size', 'bare_nuclei',\n    'bland_chromatin', 'normal_nucleoli', 'mitoses',\n    'class'\n]\nsamples = pd.read_csv('clean.data', header=None, names=column_names, index_col=0)\nsamples\n\n# + jupyter={\"outputs_hidden\": false}\ntrainning_input = samples.iloc[:,:-1]\ntarget = samples.iloc[:,-1]\n\n# + jupyter={\"outputs_hidden\": false}\nclf = RandomForestClassifier(max_depth=3, n_estimators=200)\n\n# + jupyter={\"outputs_hidden\": false}\nclf.fit(trainning_input, target)\n\n# + jupyter={\"outputs_hidden\": false}\nimportances = pd.Series(\n    clf.feature_importances_ * 100,\n    index=trainning_input.columns).sort_values(ascending=False)\nimportances\n# -\n\n100 * clf.score(trainning_input, target)\n\n\n\nfor test_size in [0.01, 0.1, 0.2, 0.5, 0.8, 0.9, 0.99]:\n    X_train, X_test, y_train, y_test = train_test_split(\n        trainning_input, target, test_size=test_size)\n    tclf = RandomForestClassifier(max_depth=3)\n    tclf.fit(X_train, y_train)\n    score = tclf.score(X_test, y_test)\n    print(f'{1 - test_size:.1%} {score:.2%}')\n# Random number generator\n\n\n"
  },
  {
    "path": "Chapter11/.gitignore",
    "content": "dask-worker-space\ndata\nmydask.png\nx.png"
  },
  {
    "path": "Chapter11/Dask_Distributed.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# +\n#import dask\n#from dask.base import get_scheduler\n#import dask.array as da\n#\n#mosquito = da.from_zarr('data/AG1000G-AO/2L/calldata/GT')\n#print(get_scheduler(collections=[mosquito]).__module__) \n\n# +\nimport zarr\nimport dask.dataframe as dd\nfrom dask.distributed import Client\n\n#client = Client('127.0.0.1:8786')\nclient = Client()\nclient\n\n# +\nimport numpy as np\nimport dask.array as da\n\nmosquito = da.from_zarr('data/AG1000G-AO/2L/calldata/GT')\n# -\n\nmosquito\n\nmosquito.shape[0]\n\nmosquito = mosquito.rechunk((mosquito.shape[0]//8, 81, 2))\n\nmosquito = mosquito.persist()\n\nmosquito.visualize()\n\nmosquito\n\nmosquito.chunks\n\n\ndef calc_stats(my_chunk):\n    num_miss = np.sum(np.equal(my_chunk[0][0][:,:,0], -1), axis=1)\n    return num_miss\n\n\nstats = da.blockwise(calc_stats, 'i', mosquito, 'ijk', dtype=np.uint8)\n\nstats.visualize()\n\nstat_results = stats.compute()\n\nstat_results\n\n\n"
  },
  {
    "path": "Chapter11/Dask_Intro.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\nimport zarr\n\nmosquito = zarr.open('data/AG1000G-AO/2L/calldata/GT')\nmosquito\nzarr.array(mosquito, chunks=(1 + 48525747 // 4, 81, 2), store='data/rechunk')\n\nmosquito = zarr.open('data/rechunk')\nmosquito.chunks\n\n# +\nimport numpy as np\nimport dask.array as da\n\nmosquito = da.from_zarr('data/rechunk')\n#mosquito = da.from_zarr('data/AG1000G-AO/2L/calldata/GT')\n# ^^^ load array\n# -\n\nmosquito\n\nprint(mosquito[0])\n\nmosquito[0].compute()\n\nmosquito.visualize(rankdir='TB')\n\n\ndef calc_stats(variant):\n    variant = variant.reshape(variant.shape[0] // 2, 2)\n    num_misses = np.sum(np.equal(variant, -1)) // 2\n    return num_misses\n\n\nmosquito_2d = mosquito.reshape(mosquito.shape[0], mosquito.shape[1] * mosquito.shape[2])\nmosquito_2d.visualize(rankdir='TB')\n\nmosquito_2d\n\nmax_pos = 10000000\nstats = da.apply_along_axis(\n    calc_stats, 1, mosquito_2d[:max_pos,:],\n    shape=(max_pos,), dtype=np.int64)\n\nstats.visualize('x.png',rankdir='TB')\n\na = stats.compute()\n\na\n\n\n"
  },
  {
    "path": "Chapter11/MP_intro.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Downloading data\n\n# https://malariagen.github.io/vector-data/ag3/download.html\n# !mkdir -p data/AG1000G-AO/\n# !gsutil -m rsync -r \\\n#         -x '.*/calldata/(AD|GQ|MQ)/.*' \\\n#         gs://vo_agam_release/v3/snp_genotypes/all/AG1000G-AO/ \\\n#         data/AG1000G-AO/ > /dev/null\n\n# !mkdir -p data/metadata/\n# !gsutil -m rsync -r gs://vo_agam_release/v3/metadata/ data/metadata/\n\n# # BLA\n\n# +\nimport numpy as np\nimport zarr\n\nmosquito = zarr.open('data/AG1000G-AO')\nprint(mosquito.tree())\n\ngt_2l = mosquito['/2L/calldata/GT']\ngt_2l.info\n\ndir(gt_2l)\ngt_2l.shape[0]\n\n# +\nfrom math import ceil\nfrom multiprocessing import Pool\n\n\ndef calc_stats(my_chunk):\n    num_miss = np.sum(np.equal(my_chunk[:,:,0], -1), axis=1)\n    num_anc_hom = np.sum(\n        np.all([\n            np.equal(my_chunk[:,:,0], 0),\n            np.equal(my_chunk[:,:,0], my_chunk[:,:,1])], axis=0), axis=1)\n    num_het = np.sum(\n        np.not_equal(\n            my_chunk[:,:,0],\n            my_chunk[:,:,1]), axis=1)\n    return num_miss, num_anc_hom, num_het\n\n\nchunk_pos_size = gt_2l.chunks[0]\nmax_pos = gt_2l.shape[0]\n\n\nintervals = []\nfor chunk_pos in range(ceil(max_pos / chunk_pos_size)):\n    start_pos = chunk_pos * chunk_pos_size\n    end_pos = min(max_pos + 1, (chunk_pos + 1) * chunk_pos_size)\n    intervals.append((start_pos, end_pos))\n\n\ndef compute_interval(interval):\n    start_pos, end_pos = interval\n    my_chunk = gt_2l[start_pos:end_pos, :, :]\n    num_samples = my_chunk.shape[1]\n    num_miss, num_anc_hom, num_het = calc_stats(my_chunk)\n    chunk_complete_data = np.sum(np.equal(num_miss, 0))\n    chunk_more_anc_hom = np.sum(num_anc_hom > num_het)\n    return chunk_complete_data, chunk_more_anc_hom\n\n\nwith Pool() as p:\n    print(p)\n    chunk_returns = p.map(compute_interval, intervals)\n    complete_data = sum(map(lambda x: x[0], chunk_returns))\n    more_anc_hom = sum(map(lambda x: x[1], chunk_returns))\n    \n    print(complete_data, more_anc_hom)\n# -\n\n\n"
  },
  {
    "path": "Chapter11/Zarr_Intro.py",
    "content": "# ---\n# jupyter:\n#   jupytext:\n#     text_representation:\n#       extension: .py\n#       format_name: light\n#       format_version: '1.5'\n#       jupytext_version: 1.13.0\n#   kernelspec:\n#     display_name: Python 3 (ipykernel)\n#     language: python\n#     name: python3\n# ---\n\n# # Downloading data\n\n# https://malariagen.github.io/vector-data/ag3/download.html\n# !mkdir -p data/AG1000G-AO/\n# !gsutil -m rsync -r \\\n#         -x '.*/calldata/(AD|GQ|MQ)/.*' \\\n#         gs://vo_agam_release/v3/snp_genotypes/all/AG1000G-AO/ \\\n#         data/AG1000G-AO/ > /dev/null\n\n# !mkdir -p data/metadata/\n# !gsutil -m rsync -r gs://vo_agam_release/v3/metadata/ data/metadata/\n\n# # BLA\n\n# +\nimport numpy as np\nimport zarr\n\nmosquito = zarr.open('data/AG1000G-AO')\nprint(mosquito.tree())\n# -\n\nmosquito['samples']\n\nnp.array(mosquito['samples'])\n\ngt_2l = mosquito['/2L/calldata/GT']\ngt_2l\ngt_2l.info\n\ngt_2l[400000,:,:]\n\n# +\n# Do not do np.array(gt_2l)\n# -\n\ndir(gt_2l)\ngt_2l.shape[0]\n\n# +\nfrom math import ceil\n\nchunk_pos_size = gt_2l.chunks[0]\nmax_pos = gt_2l.shape[0]\n\n\ndef calc_stats(my_chunk):\n    num_miss = np.sum(np.equal(my_chunk[:,:,0], -1), axis=1)\n    num_anc_hom = np.sum(\n        np.all([\n            np.equal(my_chunk[:,:,0], 0),\n            np.equal(my_chunk[:,:,0], my_chunk[:,:,1])], axis=0), axis=1)\n    num_het = np.sum(\n        np.not_equal(\n            my_chunk[:,:,0],\n            my_chunk[:,:,1]), axis=1)\n    return num_miss, num_anc_hom, num_het\n\n\ncomplete_data = 0\nmore_anc_hom = 0\ntotal_pos = 0\nfor chunk_pos in range(ceil(max_pos / chunk_pos_size)):\n    start_pos = chunk_pos * chunk_pos_size\n    end_pos = min(max_pos + 1, (chunk_pos + 1) * chunk_pos_size)\n    my_chunk = gt_2l[start_pos:end_pos, :, :]\n    #print(start_pos, end_pos, my_chunk.shape)\n    num_samples = my_chunk.shape[1]\n    num_miss, num_anc_hom, num_het = calc_stats(my_chunk)\n    chunk_complete_data = np.sum(np.equal(num_miss, 0))\n    #print(end_pos - start_pos, my_chunk.shape, num_anc_hom.shape, num_het.shape)\n    chunk_more_anc_hom = np.sum(num_anc_hom > num_het)\n    print(np.sum(num_anc_hom > num_het))\n    complete_data += chunk_complete_data\n    more_anc_hom += chunk_more_anc_hom\n    total_pos += (end_pos - start_pos)\nprint(complete_data, more_anc_hom, total_pos)\n# -\n\n\n"
  },
  {
    "path": "Chapter12/Builtin.py",
    "content": "import functools\n\n\n@functools.cache\ndef fibo(n):\n    if n == 0:\n        return 0\n    if n == 1:\n        return 1\n    return fibo(n - 1) + fibo(n - 2)\n\n\nfibo(1000)\n\n\ndef gene_min_reads(source, min_reads):\n    return map(\n        lambda x: x[0],\n        filter(\n            lambda x: x[1] >= min_reads,\n            source.items()))\n\n\nlist(gene_min_reads({'LCT': 10, 'MRAP2': 1}, 2))\n\n\nmultiplication = lambda x, y: x * y\n\ndouble = functools.partial(multiplication, 2)\n\ndouble(3)\n"
  },
  {
    "path": "Chapter12/Lazy.py",
    "content": "import pandas as pd\n\n\ndef load(file_name):\n    df = pd.read_csv(file_name).set_index('gene')\n    return dict(df['count'])\n\n\ndef get_min_reads(all_data, min_reads):\n    return {\n        gene: count\n        for gene, count in all_data.items()\n        if count >= min_reads\n    }\n\n\ndef has_min_observations(subset_data, min_observations):\n    return len(subset_data) >= min_observations\n\n\nprint(has_min_observations(\n    get_min_reads(\n        load('my_genes.csv'), 4\n    ), 3))\n\n\ndef get_rec(file_name):\n    with open(file_name) as f:\n        f.readline()  # header\n        for line in f:\n            toks = line.strip().split(',')\n            yield toks[0], int(toks[1])\n\n\ndef gene_min_reads(source, min_reads):\n    for gene, count in source:\n        if count >= min_reads:\n            yield gene\n\n\ndef gene_min_observations(subset_source, min_observations):\n    my_observations = 0\n    for gene in subset_source:\n        my_observations += 1\n        if my_observations == min_observations:\n            return True\n    return False\n\n\nprint(gene_min_observations(\n    gene_min_reads(\n        get_rec('my_genes.csv'), 4\n    ), 2))\n"
  },
  {
    "path": "Chapter12/Mutability.py",
    "content": "import shutil\nimport pandas as pd\n\n\ndef restore_db(file_name):\n    shutil.copyfile(f'{file_name}.base', file_name)\n\n\ndef load(file_name):\n    df = pd.read_csv(file_name).set_index('gene')\n    return dict(df['count'])\n\n\ndef save(dict_db, file_name):\n    pd.Series(dict_db).to_csv(\n        file_name, index_label='gene', header=['count'])\n\n\ndef add_sample_dict(dict_db, gene_list):\n    for gene in gene_list:\n        dict_db[gene] = dict_db.get(0) + 1\n\n\ndef add_sample_new_dict(dict_db, gene_list):\n    my_dict_db = dict(dict_db)  # next recipe\n    for gene in gene_list:\n        my_dict_db[gene] = my_dict_db.get(0) + 1\n    return my_dict_db\n\n\ngene_count = load('my_genes.csv')\n\nadd_sample_dict(gene_count, ['DEPP'])\n\nnew_gene_count = add_sample_new_dict(gene_count, ['DEPP'])\n"
  },
  {
    "path": "Chapter12/Persistence1.py",
    "content": "import shutil\nimport pandas as pd\n\n\ndef restore_db(file_name):\n    shutil.copyfile(f'{file_name}.base', file_name)\n\n\ndef load(file_name):\n    df = pd.read_csv(file_name).set_index('gene')\n    return dict(df['count'])\n\n\ndef save(dict_db, file_name):\n    pd.Series(dict_db).to_csv(\n        file_name, index_label='gene', header=['count'])\n\n\ndef add_sample_csv(gene_list):\n    gene_count = load('my_genes.csv')\n    for gene in gene_list:\n        gene_count[gene] = gene_count.get(0) + 1\n    save(gene_count, 'my_genes.csv')\n\n\nrestore_db('my_genes.csv')\n\nadd_sample_csv(['MC4R', 'TYR'])\nadd_sample_csv(['LCT', 'HLA-A'])\nadd_sample_csv(['HLA-B', 'HLA-C'])\n"
  },
  {
    "path": "Chapter12/Persistence2.py",
    "content": "import shutil\nimport pandas as pd\n\n\ndef restore_db(file_name):\n    shutil.copyfile(f'{file_name}.base', file_name)\n\n\ndef load(file_name):\n    df = pd.read_csv(file_name).set_index('gene')\n    return dict(df['count'])\n\n\ndef save(dict_db, file_name):\n    pd.Series(dict_db).to_csv(\n        file_name, index_label='gene', header=['count'])\n\n\ndef add_sample_new_dict(dict_db, gene_list):\n    my_dict_db = dict(dict_db)  # next recipe\n    for gene in gene_list:\n        my_dict_db[gene] = my_dict_db.get(0) + 1\n    return my_dict_db\n\n\nrestore_db('my_genes.csv')\n\ngene_count = load('my_genes.csv')\ngene_count = add_sample_new_dict(gene_count, ['MC4R', 'TYR'])\ngene_count = add_sample_new_dict(gene_count, ['LCT', 'HLA-A'])\ngene_count = add_sample_new_dict(gene_count, ['HLA-B', 'HLA-C'])\nsave(gene_count, 'my_genes.csv')\n"
  },
  {
    "path": "Chapter12/Pure.py",
    "content": "import shutil\nimport pandas as pd\n\n\ndef restore_db(file_name):\n    shutil.copyfile(f'{file_name}.base', file_name)\n\n\ndef load(file_name):\n    df = pd.read_csv(file_name).set_index('gene')\n    return dict(df['count'])\n\n\ndef save(dict_db, file_name):\n    pd.Series(dict_db).to_csv(\n        file_name, index_label='gene', header=['count'])\n\n\ndef add_sample_csv(gene_list):\n    gene_count = load('my_genes.csv')\n    for gene in gene_list:\n        gene_count[gene] = gene_count.get(0) + 1\n    save(gene_count, 'my_genes.csv')\n\n\ndef add_sample_global_dict(gene_list):\n    global gene_count\n    for gene in gene_list:\n        gene_count[gene] = gene_count.get(0) + 1\n\n\ndef add_sample_dict(dict_db, gene_list):\n    for gene in gene_list:\n        dict_db[gene] = dict_db.get(0) + 1\n\n\ngene_count = load('my_genes.csv')\n\n\nadd_sample_csv(['MC4R', 'TYR'])\n\nadd_sample_dict(gene_count, ['MC4R', 'TYR'])\n\n\nsave(gene_count, 'my_genes.csv')\n"
  },
  {
    "path": "Chapter12/Recursion.py",
    "content": "def fibo_iter(n):\n    if n < 2:\n        return n\n    last = 1\n    second_last = 0\n    for _i in range(2, n + 1):\n        result = second_last + last\n        second_last = last\n        last = result\n    return result\n\n\ndef fibo_naive(n):\n    if n == 0:\n        return 0\n    if n == 1:\n        return 1\n    return fibo_naive(n - 1) + fibo_naive(n - 2)\n\n\nfibo_iter(0)\nfibo_iter(1)\nfibo_iter(2)\nfibo_iter(3)\nfibo_iter(4)\nfibo_iter(5)\nfibo_iter(6)\nfibo_naive(1000)\n\n\ndef factorial(n):\n    if n == 1:\n        return 1\n    return n * factorial(n - 1)\n\n\nfactorial(5)\nfactorial(20000)\n"
  },
  {
    "path": "Chapter12/Tools.py",
    "content": "import functools\n\n\ndef fibo_iter(n):\n    if n == 0:\n        return 0\n    if n == 1:\n        return 1\n    last = 1\n    second_last = 1\n    for i in range(3, n + 1):\n        result = second_last + last\n        second_last = last\n        last = result\n    return result\n\n\ndef fibo_naive(n):\n    if n == 0:\n        return 0\n    if n == 1:\n        return 1\n    return fibo_naive(n - 1) + fibo_naive(n - 2)\n\n\n@functools.lru_cache\ndef fibo(n):\n    if n == 0:\n        return 0\n    if n == 1:\n        return 1\n    return fibo(n - 1) + fibo(n - 2)\n\n\ntime fibo_iter(100)\n#time fibo_naive(1000)\ntime fibo(1000)\n\n\ndef factorial(n):\n    if n == 1:\n        return 1\n    return n * factorial(n - 1)\n\n\nfactorial(20000)\n"
  },
  {
    "path": "Chapter12/my_genes.csv",
    "content": "gene,count\nLCT,5\nLEPR,4\nMRAP2,1"
  },
  {
    "path": "Chapter12/my_genes.csv.base",
    "content": "gene,count\nLCT,5\nLEPR,4\nMRAP2,1"
  },
  {
    "path": "Datasets.py",
    "content": "\n# # Datasets for the book\n#\n# Here we provide links to the datasets used in the book.\n#\n# Important Notes:\n#\n# 1. Note that these datasets are provided on external servers by third parties\n\n# # Python and the Surrounding Software Ecology\n#\n# ## R sections\n#\n# http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/20130502.phase3.sequence.index\n#\n\n# # PDB\n#\n# \n# ## Parsing mmCIF files with Biopython\n#\n# [1TUP.cif](http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=cif&compression=NO&structureId=1TUP)\"\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021 Packt\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "\n\n# Bioinformatics-with-Python-Cookbook-third-edition\n\n<a href=\"https://www.packtpub.com/product/bioinformatics-with-python-cookbook-third-edition/9781803236421\"><img src=\"https://static.packt-cdn.com/products/9781803236421/cover/smaller\" alt=\"Bioinformatics with Python Cookbook - Third Edition\" height=\"256px\" align=\"right\"></a>\n\nThis is the code repository for [Bioinformatics with Python Cookbook - Third Edition](https://www.packtpub.com/product/bioinformatics-with-python-cookbook-third-edition/9781803236421), published by Packt.\n\n**Use modern Python libraries and applications to solve real-world computational biology problems**\n\n## What is this book about?\nBioinformatics is an active research field that uses a range of simple-to-advanced computations to extract valuable information from biological data, and this book will show you how to manage these tasks using Python.\n\nThis updated third edition of the Bioinformatics with Python Cookbook begins with a quick overview of the various tools and libraries in the Python ecosystem that will help you convert, analyze, and visualize biological datasets. Next, you'll cover key techniques for next-generation sequencing, single-cell analysis, genomics, metagenomics, population genetics, phylogenetics, and proteomics with the help of real-world examples. You'll learn how to work with important pipeline systems, such as Galaxy servers and Snakemake, and understand the various modules in Python for functional and asynchronous programming. This book will also help you explore topics such as SNP discovery using statistical approaches under high-performance computing frameworks, including Dask and Spark. In addition to this, you’ll explore the application of machine learning algorithms in bioinformatics.\n\nBy the end of this bioinformatics Python book, you'll be equipped with the knowledge you need to implement the latest programming techniques and frameworks, empowering you to deal with bioinformatics data on every scale.\n\nThis book covers the following exciting features: \n* Become well-versed with data processing libraries such as NumPy, pandas, arrow, and zarr in the context of bioinformatic analysis\n* Interact with genomic databases\n* Solve real-world problems in the fields of population genetics, phylogenetics, and proteomics\n* Build bioinformatics pipelines using a Galaxy server and Snakemake\n* Work with functools and itertools for functional programming\n* Perform parallel processing with Dask on biological data\n* Explore principal component analysis (PCA) techniques with scikit-learn\n\nIf you feel this book is for you, get your [copy](https://www.amazon.in/Bioinformatics-Python-Cookbook-bioinformatics-computational/dp/1789344697/ref=sr_1_2?keywords=Bioinformatics+with+Python+Cookbook+-+Third+Edition&qid=1665382032&sr=8-2) today!\n\n<a href=\"https://www.packtpub.com/product/bioinformatics-with-python-cookbook-third-edition/9781803236421\"><img src=\"https://raw.githubusercontent.com/PacktPublishing/GitHub/master/GitHub.png\" alt=\"https://www.packtpub.com/\" border=\"5\" /></a>\n\n## Instructions and Navigations\nAll of the code is organized into folders.\n\nThe code will look like the following:\n```\nfrom Bio import SeqIO\ngenome_name = 'PlasmoDB-9.3_Pfalciparum3D7_Genome.fasta'\nrecs = SeqIO.parse(genome_name, 'fasta')\nfor rec in recs:\nprint(rec.description)\n```\n**Following is what you need for this book:**\nThis book is for bioinformatics analysts, data scientists, computational biologists, researchers, and Python developers who want to address intermediate-to-advanced biological and bioinformatics problems. Working knowledge of the Python programming language is expected. Basic knowledge of biology will also be helpful.\n\nWith the following software and hardware list you can run all code files present in the book (Chapter 1-12).\n\n### Software and Hardware List\n\n| Chapter  | Software required                                                                    | OS required                        |\n| -------- | -------------------------------------------------------------------------------------| -----------------------------------|\n|  \t1-12\t   | Python 3.9                             \t\t\t  | Any OS | \t\t\n|  \t1-12\t   | Numpy, Pandas and Matplotlib                             \t\t\t  | Any OS | \t\t\n|  \t1-12\t   | BioPython                             \t\t\t  | Any OS | \t\t\n|  \t1-12\t   | DAsk, Zarr, Sckit-learn                             \t\t\t  | Any OS | \t\t\n\nWe also provide a PDF file that has color images of the screenshots/diagrams used in this book. [Click here to download it](https://packt.link/3KQQO).\n\n  \n## Get to Know the Author\n**Tiago Antao** is a bioinformatician who is currently working in the field of genomics. A former computer scientist, Tiago moved into computational biology with an MSc in bioinformatics from the Faculty of Sciences at the University of Porto, Portugal, and a PhD on the spread of drug-resistant malaria from the Liverpool School of Tropical Medicine, UK. Post his doctoral, Tiago worked with human datasets at the University of Cambridge, UK and with mosquito whole-genome sequencing data at the University of Oxford, UK, before helping to set up the bioinformatics infrastructure at the University of Montana, USA. He currently works as a data engineer in the biotechnology field in Boston, MA. He is one of the co-authors of Biopython, a major bioinformatics package written in Python.\n### Download a free PDF\n\n <i>If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.<br>Simply click on the link to claim your free PDF.</i>\n<p align=\"center\"> <a href=\"https://packt.link/free-ebook/9781803236421\">https://packt.link/free-ebook/9781803236421 </a> </p>\n"
  },
  {
    "path": "Welcome.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"3040340b-bd0b-4266-a7a6-8b48d9a94625\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Python for Bionformatics\\n\",\n    \"\\n\",\n    \"## Datasets\\n\",\n    \"\\n\",\n    \"[Click here](Datasets.py) for the datasets used in the book. You only need this if you do not use the notebooks (as the notebooks will take care of the data)\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"ee3697db-cdfe-41c2-ae06-8dc1633b5701\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Python and the surrounding software ecology\\n\",\n    \"\\n\",\n    \"- [Interfacing with R](Chapter01/Interfacing_R.py)\\n\",\n    \"- [R Magic](Chapter01/R_magic.py)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5b2663bc-8efe-4bb0-9ac5-f9e2eb09cc5e\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"\",\n   \"name\": \"\"\n  },\n  \"language_info\": {\n   \"name\": \"\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docker/Chapter01/Dockerfile",
    "content": "FROM tiagoantao/bio3\nMAINTAINER Tiago Antao <tiago@tiago.org>\n# RUN conda create -n bioinformatics_r --clone bioinformatics_base\n\n#RUN conda install -n bioinformatics_r r-base=4.1.3 r-ggplot2=3.3.5 r-lazyeval=0.2.2 r-gridextra=2.3 rpy2\nRUN conda create -n bioinformatics_r jupyterlab jupytext pandas\nRUN conda install -n bioinformatics_r r-base r-ggplot2 r-lazyeval r-gridextra rpy2\nCMD conda run --no-capture-output -n bioinformatics_r jupyter-lab --ip=0.0.0.0 --no-browser --allow-root --port=9875 --NotebookApp.token='' --NotebookApp.password=''\n"
  },
  {
    "path": "docker/main/Dockerfile",
    "content": "FROM continuumio/anaconda3:2021.05\nMAINTAINER Tiago Antao <tiago@tiago.org>\n#ENV DEBIAN_FRONTEND noninteractive\n\n#RUN apt-get update && apt-get upgrade -y && apt-get install -y git wget build-essential unzip graphviz libgraphviz-dev pkg-config swig libx11-dev libgsl0-dev libopenblas-dev liblapacke-dev\n#RUN apt-get install -y samtools mafft muscle raxml tabix\n\nRUN git clone https://github.com/PacktPublishing/Bioinformatics-with-Python-Cookbook-third-Edition.git\n\n#RUN conda upgrade -n base conda\nRUN conda config --add channels conda-forge\nRUN conda config --add channels bioconda\nRUN conda create -n bioinformatics_base --file /Bioinformatics-with-Python-Cookbook-third-Edition/Chapter01/bioinformatics_base.txt\nRUN pip install pyarrow==8.0.0\nRUN conda init bash\n\nEXPOSE 9875\n\nWORKDIR /Bioinformatics-with-Python-Cookbook-third-Edition\n\nRUN echo setterm -foreground magenta >> /etc/bash.bashrc\nCMD conda run --no-capture-output -n bioinformatics_base jupyter-lab --ip=0.0.0.0 --no-browser --allow-root --port=9875 --NotebookApp.token='' --NotebookApp.password=''\n"
  }
]