[
  {
    "path": ".gitmodules",
    "content": "[submodule \"submodules/cp-p\"]\n\tpath = submodules/cp-p\n\turl = ../cp-p.git\n"
  },
  {
    "path": "Colours-EyeCandy/colourbars",
    "content": "#!/bin/sh\n# by Him on the Arch boards\n# ANSI Color -- use these variables to easily have different color\n#    and format output. Make sure to output the reset sequence after \n#    colors (f = foreground, b = background), and use the 'off'\n#    feature for anything you turn on.\n\ninitializeANSI()\n{\n  esc=\"\u001b\"\n\n  blackf=\"${esc}[30m\";   redf=\"${esc}[31m\";    greenf=\"${esc}[32m\"\n  yellowf=\"${esc}[33m\"   bluef=\"${esc}[34m\";   purplef=\"${esc}[35m\"\n  cyanf=\"${esc}[36m\";    whitef=\"${esc}[37m\"\n  \n  blackb=\"${esc}[40m\";   redb=\"${esc}[41m\";    greenb=\"${esc}[42m\"\n  yellowb=\"${esc}[43m\"   blueb=\"${esc}[44m\";   purpleb=\"${esc}[45m\"\n  cyanb=\"${esc}[46m\";    whiteb=\"${esc}[47m\"\n\n  boldon=\"${esc}[1m\";    boldoff=\"${esc}[22m\"\n  italicson=\"${esc}[3m\"; italicsoff=\"${esc}[23m\"\n  ulon=\"${esc}[4m\";      uloff=\"${esc}[24m\"\n  invon=\"${esc}[7m\";     invoff=\"${esc}[27m\"\n\n  reset=\"${esc}[0m\"\n}\n\n# note in this first use that switching colors doesn't require a reset\n# first - the new color overrides the old one.\n\ninitializeANSI\n\ncat << EOF\n\n ${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${whitef}▆▆▆▆▆▆▆▆▆▆${reset}\n ${boldon}${blackf} ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::${reset}\n ${boldon}${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${whitef}▆▆▆▆▆▆▆▆▆▆${reset}\n\n\nEOF\n"
  },
  {
    "path": "Colours-EyeCandy/colours",
    "content": "#!/bin/bash\n# Original: http://frexx.de/xterm-256-notes/\n#           http://frexx.de/xterm-256-notes/data/colortable16.sh\n# Modified by Aaron Griffin\n# and further by Kazuo Teramoto\n\n\nFGNAMES=(' black ' '  red  ' ' green ' ' yellow' '  blue ' 'magenta' '  cyan ' ' white ')\nBGNAMES=('DFT' 'BLK' 'RED' 'GRN' 'YEL' 'BLU' 'MAG' 'CYN' 'WHT')\necho \"     ┌──────────────────────────────────────────────────────────────────────────┐\"\nfor b in $(seq 0 8); do\n    if [ \"$b\" -gt 0 ]; then\n      bg=$(($b+39))\n    fi\n\n    echo -en \"\\033[0m ${BGNAMES[$b]} │ \"\n    for f in $(seq 0 7); do\n      echo -en \"\\033[${bg}m\\033[$(($f+30))m ${FGNAMES[$f]} \"\n    done\n    echo -en \"\\033[0m │\"\n\n    echo -en \"\\033[0m\\n\\033[0m     │ \"\n    for f in $(seq 0 7); do\n      echo -en \"\\033[${bg}m\\033[1;$(($f+30))m ${FGNAMES[$f]} \"\n    done\n    echo -en \"\\033[0m │\"\n        echo -e \"\\033[0m\"\n        \n  if [ \"$b\" -lt 8 ]; then\n    echo \"     ├──────────────────────────────────────────────────────────────────────────┤\"\n  fi\ndone\necho \"     └──────────────────────────────────────────────────────────────────────────┘\"\n\n"
  },
  {
    "path": "Colours-EyeCandy/colourtheme",
    "content": "#!/bin/bash\n#\n#   This file echoes a bunch of color codes to the \n#   terminal to demonstrate what's available.  Each \n#   line is the color code of one forground color,\n#   out of 17 (default + 16 escapes), followed by a \n#   test use of that color on all nine background \n#   colors (default + 8 escapes).\n#\n\nT='▆ ▆'   # The test text\n\necho -e \"\\n                 40m     41m     42m     43m\\\n     44m     45m     46m     47m\";\n\nfor FGs in '    m' '   1m' '  30m' '1;30m' '  31m' '1;31m' '  32m' \\\n           '1;32m' '  33m' '1;33m' '  34m' '1;34m' '  35m' '1;35m' \\\n           '  36m' '1;36m' '  37m' '1;37m';\n  do FG=${FGs// /}\n  echo -en \" $FGs \\033[$FG  $T  \"\n  for BG in 40m 41m 42m 43m 44m 45m 46m 47m;\n    do echo -en \"$EINS \\033[$FG\\033[$BG  $T  \\033[0m\";\n  done\n  echo;\ndone\necho\n"
  },
  {
    "path": "Colours-EyeCandy/hypnotoad.pl",
    "content": "#!/usr/bin/perl\n\n# script by karabaja4\n# mail: karabaja4@archlinux.us\n\nmy $blackFG_yellowBG = \"\\e[30;43m\";\nmy $blackFG_redBG = \"\\e[30;41m\";\nmy $blackFG_purpleBG = \"\\e[30;45m\";\n\nmy $yellowFG_blackBG = \"\\e[1;33;40m\";\nmy $yellowFG_redBG = \"\\e[1;33;41m\";\n\nmy $redFG_yellowBG = \"\\e[31;43m\";\n\nmy $purpleFG_yellowBG = \"\\e[35;43m\";\nmy $purpleFG_blueBG = \"\\e[1;35;44m\";\n\nmy $end = \"\\e[0m\";\n\nsystem(\"clear\");\n\nprint \"\n\n               ${blackFG_yellowBG},'${blackFG_redBG}`${blackFG_yellowBG}`.._${end}   ${blackFG_yellowBG},'${blackFG_redBG}`${end}${blackFG_yellowBG}`.${end}\n              ${blackFG_yellowBG}:${blackFG_redBG},${yellowFG_blackBG}--.${end}${blackFG_redBG}_${blackFG_yellowBG}:)\\\\,:${blackFG_redBG},${yellowFG_blackBG}._,${end}${yellowFG_redBG}.${end}${blackFG_yellowBG}:${end}\n              ${blackFG_yellowBG}:`-${yellowFG_blackBG}-${end}${blackFG_yellowBG},${blackFG_yellowBG}''${end}${redFG_yellowBG}@@\\@${end}${blackFG_yellowBG}:`.${yellowFG_redBG}.${end}${blackFG_yellowBG}.';\\\\${end}        All Glory to\n               ${blackFG_yellowBG}`,'${end}${redFG_yellowBG}@@@@@@\\@${end}${blackFG_yellowBG}`---'${redFG_yellowBG}@\\@${end}${blackFG_yellowBG}`.${end}     the HYPNOTOAD!\n               ${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@\\@${end}${blackFG_yellowBG}:${end}\n              ${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@@@\\@${end}${blackFG_yellowBG}\\\\${end}\n            ${blackFG_yellowBG},'${redFG_yellowBG}@@@@@@@@@@@@@@@@@@@@\\@${end}${purpleFG_yellowBG}:\\\\${end}${blackFG_yellowBG}.___,-.${end}\n           ${blackFG_yellowBG}`...,---'``````-..._${redFG_yellowBG}@@@\\@${end}${blackFG_purpleBG}|:${end}${redFG_yellowBG}@@@@@@\\@${end}${blackFG_yellowBG}\\\\${end}\n             ${blackFG_yellowBG}(                 )${end}${redFG_yellowBG}@@\\@${end}${blackFG_purpleBG};:${end}${redFG_yellowBG}@@@\\@)@@\\@${end}${blackFG_yellowBG}\\\\${end}  ${blackFG_yellowBG}_,-.${end}\n              ${blackFG_yellowBG}`.              (${end}${redFG_yellowBG}@@\\@${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@@@@@@@@\\@${end}${blackFG_yellowBG}`'${end}${redFG_yellowBG}@@@\\@${end}${blackFG_yellowBG}\\\\${end}\n               ${blackFG_yellowBG}:               `.${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@)@@@@@@)@@@@@,\\@${end}${blackFG_yellowBG};${end}\n               ${blackFG_purpleBG}|`${purpleFG_yellowBG}.${blackFG_yellowBG}            ${end}${purpleFG_yellowBG}_${end}${purpleFG_yellowBG},${blackFG_purpleBG}'/${end}${redFG_yellowBG}@@@@@@@)@@@@)@,'\\@${end}${blackFG_yellowBG},'${end}\n               ${blackFG_yellowBG}:${end}${blackFG_purpleBG}`.`${end}${purpleFG_yellowBG}-..____..=${end}${blackFG_purpleBG}:.-${end}${blackFG_yellowBG}':${end}${redFG_yellowBG}@@@@@.@@@@\\@_,@@,'${end}\n              ${redFG_yellowBG},'${end}${blackFG_yellowBG}\\\\ ${end}${blackFG_purpleBG}``--....${end}${purpleFG_blueBG}-)='${end}${blackFG_yellowBG}    `.${end}${redFG_yellowBG}_,@\\@${end}${blackFG_yellowBG}\\\\${end}    ${redFG_yellowBG})@@\\@'``._${end}\n             ${redFG_yellowBG}/\\@${end}${redFG_yellowBG}_${end}${redFG_yellowBG}\\@${end}${blackFG_yellowBG}`.${end}${blackFG_yellowBG}       ${end}${blackFG_redBG}(@)${end}${blackFG_yellowBG}      /${end}${redFG_yellowBG}@@@@\\@${end}${blackFG_yellowBG})${end}  ${redFG_yellowBG}; / \\\\ \\\\`-.'${end}\n            ${redFG_yellowBG}(@@\\@${end}${redFG_yellowBG}`-:${end}${blackFG_yellowBG}`.     ${end}${blackFG_yellowBG}`' ___..'${end}${redFG_yellowBG}@\\@${end}${blackFG_yellowBG}_,-'${end}   ${redFG_yellowBG}|/${end}   ${redFG_yellowBG}`.)${end}\n             ${redFG_yellowBG}`-. `.`.${end}${blackFG_yellowBG}``-----``--${end}${redFG_yellowBG},@\\@.'${end}\n               ${redFG_yellowBG}|/`.\\\\`'${end}        ${redFG_yellowBG},',');${end}\n                   ${redFG_yellowBG}`${end}         ${redFG_yellowBG}(/${end}  ${redFG_yellowBG}(/${end}\n\n\n\";\n\n\n"
  },
  {
    "path": "Colours-EyeCandy/pacman.sh",
    "content": "#!/bin/sh\n\n# ANSI Color -- use these variables to easily have different color\n#    and format output. Make sure to output the reset sequence after \n#    colors (f = foreground, b = background), and use the 'off'\n#    feature for anything you turn on.\n\ninitializeANSI()\n{\n esc=\"\u001b\"\n\n  blackf=\"${esc}[30m\";   redf=\"${esc}[31m\";    greenf=\"${esc}[32m\"\n  yellowf=\"${esc}[33m\"   bluef=\"${esc}[34m\";   purplef=\"${esc}[35m\"\n  cyanf=\"${esc}[36m\";    whitef=\"${esc}[37m\"\n  \n  blackb=\"${esc}[40m\";   redb=\"${esc}[41m\";    greenb=\"${esc}[42m\"\n  yellowb=\"${esc}[43m\"   blueb=\"${esc}[44m\";   purpleb=\"${esc}[45m\"\n  cyanb=\"${esc}[46m\";    whiteb=\"${esc}[47m\"\n\n  boldon=\"${esc}[1m\";    boldoff=\"${esc}[22m\"\n  italicson=\"${esc}[3m\"; italicsoff=\"${esc}[23m\"\n  ulon=\"${esc}[4m\";      uloff=\"${esc}[24m\"\n  invon=\"${esc}[7m\";     invoff=\"${esc}[27m\"\n\n  reset=\"${esc}[0m\"\n}\n\n# note in this first use that switching colors doesn't require a reset\n# first - the new color overrides the old one.\n\nclear \n\ninitializeANSI\n\ncat << EOF\n\n ${yellowf}  ▄███████▄${reset}   ${redf}  ▄██████▄${reset}    ${greenf}  ▄██████▄${reset}    ${bluef}  ▄██████▄${reset}    ${purplef}  ▄██████▄${reset}    ${cyanf}  ▄██████▄${reset}\n ${yellowf}▄█████████▀▀${reset}  ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄${reset}  ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄${reset}  ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄${reset}  ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄${reset}  ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}\n ${yellowf}███████▀${reset}      ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███${reset}  ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███${reset}  ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███${reset}  ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███${reset}  ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}\n ${yellowf}███████▄${reset}      ${redf}████████████${reset}  ${greenf}████████████${reset}  ${bluef}████████████${reset}  ${purplef}████████████${reset}  ${cyanf}████████████${reset}\n ${yellowf}▀█████████▄▄${reset}  ${redf}██▀██▀▀██▀██${reset}  ${greenf}██▀██▀▀██▀██${reset}  ${bluef}██▀██▀▀██▀██${reset}  ${purplef}██▀██▀▀██▀██${reset}  ${cyanf}██▀██▀▀██▀██${reset}\n ${yellowf}  ▀███████▀${reset}   ${redf}▀   ▀  ▀   ▀${reset}  ${greenf}▀   ▀  ▀   ▀${reset}  ${bluef}▀   ▀  ▀   ▀${reset}  ${purplef}▀   ▀  ▀   ▀${reset}  ${cyanf}▀   ▀  ▀   ▀${reset}\n \n ${boldon}${yellowf}  ▄███████▄   ${redf}  ▄██████▄    ${greenf}  ▄██████▄    ${bluef}  ▄██████▄    ${purplef}  ▄██████▄    ${cyanf}  ▄██████▄${reset}\n ${boldon}${yellowf}▄█████████▀▀  ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄  ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄  ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄  ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄  ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}\n ${boldon}${yellowf}███████▀      ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███  ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███  ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███  ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███  ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}\n ${boldon}${yellowf}███████▄      ${redf}████████████  ${greenf}████████████  ${bluef}████████████  ${purplef}████████████  ${cyanf}████████████${reset}\n ${boldon}${yellowf}▀█████████▄▄  ${redf}██▀██▀▀██▀██  ${greenf}██▀██▀▀██▀██  ${bluef}██▀██▀▀██▀██  ${purplef}██▀██▀▀██▀██  ${cyanf}██▀██▀▀██▀██${reset}\n ${boldon}${yellowf}  ▀███████▀   ${redf}▀   ▀  ▀   ▀  ${greenf}▀   ▀  ▀   ▀  ${bluef}▀   ▀  ▀   ▀  ${purplef}▀   ▀  ▀   ▀  ${cyanf}▀   ▀  ▀   ▀${reset}\n\nEOF\n"
  },
  {
    "path": "Colours-EyeCandy/spacey.sh",
    "content": "#!/bin/bash\n#ANSI color scheme script featuring Space Invaders\n#\n# Original: http://crunchbanglinux.org/forums/post/126921/#p126921\n# Modified by lolilolicon\n \n\nf=3 b=4\nfor j in f b; do\n  for i in {0..7}; do\n    eval ${j}${i}=\\$\\'\\\\e\\[${!j}${i}m\\'\n  done\ndone\nbld=$'\\e[1m'\nrst=$'\\e[0m'\n\ncat << EOF\n\n $f0  ▄██▄     $f1  ▀▄   ▄▀     $f2 ▄▄▄████▄▄▄    $f3  ▄██▄     $f4  ▀▄   ▄▀     $f5 ▄▄▄████▄▄▄    $f6  ▄██▄   $rst\n $f0▄█▀██▀█▄   $f1 ▄█▀███▀█▄    $f2███▀▀██▀▀███   $f3▄█▀██▀█▄   $f4 ▄█▀███▀█▄    $f5███▀▀██▀▀███   $f6▄█▀██▀█▄ $rst       \n $f0▀▀█▀▀█▀▀   $f1█▀███████▀█   $f2▀▀▀██▀▀██▀▀▀   $f3▀▀█▀▀█▀▀   $f4█▀███████▀█   $f5▀▀▀██▀▀██▀▀▀   $f6▀▀█▀▀█▀▀ $rst        \n $f0▄▀▄▀▀▄▀▄   $f1▀ ▀▄▄ ▄▄▀ ▀   $f2▄▄▀▀ ▀▀ ▀▀▄▄   $f3▄▀▄▀▀▄▀▄   $f4▀ ▀▄▄ ▄▄▀ ▀   $f5▄▄▀▀ ▀▀ ▀▀▄▄   $f6▄▀▄▀▀▄▀▄ $rst        \n\n$bld $f0  ▄██▄     $f1  ▀▄   ▄▀     $f2 ▄▄▄████▄▄▄    $f3  ▄██▄     $f4  ▀▄   ▄▀     $f5 ▄▄▄████▄▄▄    $f6  ▄██▄  $rst\n$bld $f0▄█▀██▀█▄   $f1 ▄█▀███▀█▄    $f2███▀▀██▀▀███   $f3▄█▀██▀█▄   $f4 ▄█▀███▀█▄    $f5███▀▀██▀▀███   $f6▄█▀██▀█▄$rst\n$bld $f0▀▀█▀▀█▀▀   $f1█▀███████▀█   $f2▀▀▀██▀▀██▀▀▀   $f3▀▀█▀▀█▀▀   $f4█▀███████▀█   $f5▀▀▀██▀▀██▀▀▀   $f6▀▀█▀▀█▀▀$rst\n$bld $f0▄▀▄▀▀▄▀▄   $f1▀ ▀▄▄ ▄▄▀ ▀   $f2▄▄▀▀ ▀▀ ▀▀▄▄   $f3▄▀▄▀▀▄▀▄   $f4▀ ▀▄▄ ▄▄▀ ▀   $f5▄▄▀▀ ▀▀ ▀▀▄▄   $f6▄▀▄▀▀▄▀▄$rst\n\n\n                                               $f7▌$rst\n\n                                             $f7▌$rst\n\n                                        $f7    ▄█▄    $rst\n                                        $f7▄█████████▄$rst\n                                        $f7▀▀▀▀▀▀▀▀▀▀▀$rst\n\nEOF\n"
  },
  {
    "path": "Colours-EyeCandy/tanks.sh",
    "content": "#!/bin/sh\n\n# ANSI Color -- use these variables to easily have different color\n#    and format output. Make sure to output the reset sequence after \n#    colors (f = foreground, b = background), and use the 'off'\n#    feature for anything you turn on.\n\ninitializeANSI()\n{\n  esc=\"\u001b\"\n\n  blackf=\"${esc}[30m\";   redf=\"${esc}[31m\";    greenf=\"${esc}[32m\"\n  yellowf=\"${esc}[33m\"   bluef=\"${esc}[34m\";   purplef=\"${esc}[35m\"\n  cyanf=\"${esc}[36m\";    whitef=\"${esc}[37m\"\n  \n  blackb=\"${esc}[40m\";   redb=\"${esc}[41m\";    greenb=\"${esc}[42m\"\n  yellowb=\"${esc}[43m\"   blueb=\"${esc}[44m\";   purpleb=\"${esc}[45m\"\n  cyanb=\"${esc}[46m\";    whiteb=\"${esc}[47m\"\n\n  boldon=\"${esc}[1m\";    boldoff=\"${esc}[22m\"\n  italicson=\"${esc}[3m\"; italicsoff=\"${esc}[23m\"\n  ulon=\"${esc}[4m\";      uloff=\"${esc}[24m\"\n  invon=\"${esc}[7m\";     invoff=\"${esc}[27m\"\n\n  reset=\"${esc}[0m\"\n}\n\n# note in this first use that switching colors doesn't require a reset\n# first - the new color overrides the old one.\n\ninitializeANSI\n\ncat << EOF\n\n  ${boldon}${redf}    █    ${reset}    ${boldon}${greenf}    █    ${reset}    ${boldon}${yellowf}    █    ${reset}    ${boldon}${bluef}    █    ${reset}    ${boldon}${purplef}    █    ${reset}    ${boldon}${cyanf}    █    ${reset}\n  ${boldon}${redf}▄▄  █  ▄▄${reset}    ${boldon}${greenf}▄▄  █  ▄▄${reset}    ${boldon}${yellowf}▄▄  █  ▄▄${reset}    ${boldon}${bluef}▄▄  █  ▄▄${reset}    ${boldon}${purplef}▄▄  █  ▄▄${reset}    ${boldon}${cyanf}▄▄  █  ▄▄${reset}\n  ${boldon}${redf}███▀▀▀███${reset}    ${boldon}${greenf}███▀▀▀███${reset}    ${boldon}${yellowf}███▀▀▀███${reset}    ${boldon}${bluef}███▀▀▀███${reset}    ${boldon}${purplef}███▀▀▀███${reset}    ${boldon}${cyanf}███▀▀▀███${reset}\n  ${boldon}${redf}███ █ ███${reset}    ${boldon}${greenf}███ █ ███${reset}    ${boldon}${yellowf}███ █ ███${reset}    ${boldon}${bluef}███ █ ███${reset}    ${boldon}${purplef}███ █ ███${reset}    ${boldon}${cyanf}███ █ ███${reset}\n  ${boldon}${redf}██ ▀▀▀ ██${reset}    ${boldon}${greenf}██ ▀▀▀ ██${reset}    ${boldon}${yellowf}██ ▀▀▀ ██${reset}    ${boldon}${bluef}██ ▀▀▀ ██${reset}    ${boldon}${purplef}██ ▀▀▀ ██${reset}    ${boldon}${cyanf}██ ▀▀▀ ██${reset}\n  \n  ${redf}    █    ${reset}    ${greenf}    █    ${reset}    ${yellowf}    █    ${reset}    ${bluef}    █    ${reset}    ${purplef}    █    ${reset}    ${cyanf}    █    ${reset}\n  ${redf}▄▄  █  ▄▄${reset}    ${greenf}▄▄  █  ▄▄${reset}    ${yellowf}▄▄  █  ▄▄${reset}    ${bluef}▄▄  █  ▄▄${reset}    ${purplef}▄▄  █  ▄▄${reset}    ${cyanf}▄▄  █  ▄▄${reset}\n  ${redf}███▀▀▀███${reset}    ${greenf}███▀▀▀███${reset}    ${yellowf}███▀▀▀███${reset}    ${bluef}███▀▀▀███${reset}    ${purplef}███▀▀▀███${reset}    ${cyanf}███▀▀▀███${reset}\n  ${redf}███ █ ███${reset}    ${greenf}███ █ ███${reset}    ${yellowf}███ █ ███${reset}    ${bluef}███ █ ███${reset}    ${purplef}███ █ ███${reset}    ${cyanf}███ █ ███${reset}\n  ${redf}██ ▀▀▀ ██${reset}    ${greenf}██ ▀▀▀ ██${reset}    ${yellowf}██ ▀▀▀ ██${reset}    ${bluef}██ ▀▀▀ ██${reset}    ${purplef}██ ▀▀▀ ██${reset}    ${cyanf}██ ▀▀▀ ██${reset}  \nEOF\n"
  },
  {
    "path": "README.md",
    "content": "A bunch of scripts I keep in `~/Scripts`, which is included in `$PATH`.\n"
  },
  {
    "path": "aur-check",
    "content": "#! /usr/bin/env python3\n\n\"\"\"\nCheck the repo for problems and new package versions\n\"\"\"\n\nimport subprocess\nfrom pathlib import Path\n\nimport tomlkit.toml_file\n\nSOURCE_DIRS = [\n    {\n        \"path\": Path(\"~/Arch/packaging/aur/\").expanduser(),\n        \"nvchecker_source\": \"aur\",\n    },\n]\nNVCHECKER_CONFIG_FILE = Path(\"~/Arch/packaging/aur/nvchecker.toml\").expanduser()\n\n\ndef get_from_SRCINFO(path, key):\n    with open(path, \"r\") as f:\n        for line in f.readlines():\n            line = line.strip()\n            if not line or line.startswith(\"#\"):\n                continue\n            k, v = line.split(\"=\", 1)\n            if k.strip() == key:\n                return v.strip()\n\n\ndef get_from_PKGBUILD(path, key):\n    with open(path, \"r\") as f:\n        for line in f.readlines():\n            if line.startswith(f\"{key}=\"):\n                value = line.split(\"=\", 1)[1].strip()\n                if value.startswith(\"'\") and value.endswith(\"'\"):\n                    value = value[1:-1]\n                if value.startswith('\"') and value.endswith('\"'):\n                    value = value[1:-1]\n                return value\n\n\ndef nvchecker():\n    \"\"\"Updates ``nvchecker`` config file with the sources defined in ``SOURCE_DIRS``\n    and then runs ``nvchecker``.\n    \"\"\"\n    for src in SOURCE_DIRS:\n        root_path = src[\"path\"]\n\n        # read the config file\n        config_file = tomlkit.toml_file.TOMLFile(NVCHECKER_CONFIG_FILE)\n        config = config_file.read()\n\n        # iterate over package directories in the source root\n        for pkg in root_path.iterdir():\n            if not pkg.is_dir():\n                continue\n            elif not (pkg / \"PKGBUILD\").is_file():\n                print(f\"WARNING: PKGBUILD not found in {pkg}\")\n                continue\n\n            # extract from .SRCINFO if it exists\n            if (pkg / \".SRCINFO\").is_file():\n                pkgname = get_from_SRCINFO(pkg / \".SRCINFO\", \"pkgname\")\n                # pkgver = get_from_SRCINFO(pkg / \".SRCINFO\", \"pkgver\")\n            else:\n                # extract pkgname and pkgver from PKGBUILD in the most hackish way\n                pkgname = pkg.name\n                # pkgname = get_from_PKGBUILD(pkg / \"PKGBUILD\", \"pkgname\")\n                # pkgver = get_from_PKGBUILD(pkg / \"PKGBUILD\", \"pkgver\")\n\n            # ensure that a TOML table for the pkgname exists\n            if pkgname not in config:\n                config.add(pkgname, tomlkit.table())\n                update_config = True\n            else:\n                update_config = src.get(\"nvchecker_overwrite\", True)\n\n            # update the config file\n            if update_config:\n                source = src[\"nvchecker_source\"]\n                config[pkgname][\"source\"] = source\n                if source in {\"aur\", \"archpkg\"}:\n                    config[pkgname][source] = pkgname\n                elif source == \"gitlab\":\n                    config[pkgname][\"host\"] = src[\"nvchecker_host\"]\n                    config[pkgname][\"gitlab\"] = src[\"nvchecker_gitlab_format\"].format(\n                        remote_pkgname=pkgname\n                    )\n\n        # write the config file\n        config_file.write(config)\n\n    # run nvchecker\n    subprocess.run([\"nvchecker\", \"-c\", NVCHECKER_CONFIG_FILE], check=True)\n\n\ndef check():\n    nvchecker()\n\n    # TODO: check if rebuild-detector is installed\n    print(\"Checking packages that need to be rebuilt...\")\n    subprocess.run([\"checkrebuild\", \"-i\", \"lahwaacz\"], check=True)\n\n    # TODO: list packages that are in the database, but package file is deleted or source is missing\n\n\nif __name__ == \"__main__\":\n    check()\n"
  },
  {
    "path": "aur-release",
    "content": "#!/bin/bash\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n# aur-remotebuild - build packages remotely using aur-chroot\n# (based on commitpkg from devtools)\nset -o errexit\nshopt -s nullglob\nreadonly argv0=release\n\nsource /usr/share/devtools/lib/common.sh\nsource /usr/share/devtools/lib/util/srcinfo.sh\n\nsource /usr/share/makepkg/util/parseopts.sh\nsource /usr/share/makepkg/util/util.sh\n\nset -eo pipefail\n\nif [[ ! -f PKGBUILD ]]; then\n    echo \"No PKGBUILD in the current directory!\" >&2\n    exit 1\nfi\n\n# Check if releasing from a branch\nif ! branchname=$(git symbolic-ref --short HEAD); then\n    die 'not on any branch'\nfi\nif [[ \"$branchname\" != master ]]; then\n    die 'must be run from the master branch'\nfi\n\n# default arguments\nserver=pkgbuild.com\nremote_path=public_html/repo\nrsyncopts=(\"${RSYNC_OPTS[@]}\" --perms --chmod='u=rw,go=r')\nrelease_commit=1\nrelease_push=0\nrelease_upload=0\n\nusage() {\n    echo >&2 \"Usage: $argv0 [OPTIONS]\"\n    echo >&2 \"\"\n    echo >&2 \"Run this script in a PKGBUILD dir to release an already built package.\"\n    echo >&2 \"\"\n    echo >&2 \"The script comprises the following operations:\"\n    echo >&2 \"\"\n    echo >&2 \"- (default) modified version-controlled files are staged for commit\"\n    echo >&2 \"- (default) all build artifacts are signed with gpg\"\n    echo >&2 \"- (optional) commits are pushed to the remote git repository\"\n    echo >&2 \"- (optional) build artifacts are uploaded to the binary repository\"\n    echo >&2 \"\"\n    echo >&2 \"OPTIONS\"\n    echo >&2 \"    --no-commit   Do not stage version-controlled files for commit\"\n    echo >&2 \"    --push        Automatically push commits to the remote git repository\"\n    echo >&2 \"    --upload      Automatically upload all build artifacts to the binary\"\n    echo >&2 \"                  repository hosted at $server:$remote_path\"\n    exit 1\n}\n\n## option parsing\nopt_short=''\nopt_long=('no-commit' 'push' 'upload')\nopt_hidden=()\n\nif ! parseopts \"$opt_short\" \"${opt_long[@]}\" \"${opt_hidden[@]}\" -- \"$@\"; then\n    usage\nfi\nset -- \"${OPTRET[@]}\"\n\nwhile true; do\n    case \"$1\" in\n        --no-commit)\n            release_commit=0\n            ;;\n        --push)\n            release_push=1\n            ;;\n        --upload)\n            release_upload=1\n            ;;\n        --)\n            shift;\n            break\n            ;;\n    esac\n    shift\ndone\n\n\ncheck_pkgbuild_validity() {\n    # shellcheck source=/usr/share/pacman/PKGBUILD.proto\n    . ./PKGBUILD\n\n    # skip when there are no sources available\n    if (( ! ${#source[@]} )); then\n        return\n    fi\n\n    # validate sources hash algo is at least > sha1\n    local bad_algos=(\"cksums\" \"md5sums\" \"sha1sums\")\n    local good_hash_algo=false\n\n    # from makepkg libmakepkg/util/schema.sh\n    for integ in \"${known_hash_algos[@]}\"; do\n        local sumname=\"${integ}sums\"\n        if [[ -n ${!sumname} ]] && ! in_array \"${sumname}\" \"${bad_algos[@]}\"; then\n            good_hash_algo=true\n            break\n        fi\n    done\n\n    if ! $good_hash_algo; then\n        die \"PKGBUILD lacks a secure cryptographic checksum, insecure algorithms: ${bad_algos[*]}\"\n    fi\n}\n\n# Source makepkg.conf; fail if it is not found\nif [[ -r '/etc/makepkg.conf' ]]; then\n    source '/etc/makepkg.conf'\nelse\n    die '/etc/makepkg.conf not found!'\nfi\n\n# Source user-specific makepkg.conf overrides\nif [[ -r \"${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf\" ]]; then\n    # shellcheck source=/dev/null\n    source \"${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf\"\nelif [[ -r \"$HOME/.makepkg.conf\" ]]; then\n    # shellcheck source=/dev/null\n    source \"$HOME/.makepkg.conf\"\nfi\n\nsource=()\n# shellcheck source=/usr/share/pacman/PKGBUILD.proto\n. ./PKGBUILD\npkgbase=${pkgbase:-$pkgname}\n\nif (( ${#validpgpkeys[@]} != 0 )); then\n    if [[ -d keys ]]; then\n        for key in \"${validpgpkeys[@]}\"; do\n            if [[ ! -f keys/pgp/$key.asc ]]; then\n                export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files'\n            fi\n        done\n    else\n        export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files'\n    fi\n\n    git add --force -- keys/pgp/*\nfi\n\n# find files which should be under source control\nneedsversioning=(PKGBUILD)\nfor s in \"${source[@]}\"; do\n    [[ $s != *://* ]] && needsversioning+=(\"$s\")\ndone\nfor i in 'changelog' 'install'; do\n    while read -r file; do\n        # evaluate any bash variables used\n        # shellcheck disable=SC2001\n        eval \"file=\\\"$(sed \"s/^\\(['\\\"]\\)\\(.*\\)\\1\\$/\\2/\" <<< \"$file\")\\\"\"\n        needsversioning+=(\"$file\")\n    done < <(sed -n \"s/^[[:space:]]*$i=//p\" PKGBUILD)\ndone\nfor key in \"${validpgpkeys[@]}\"; do\n    needsversioning+=(\"keys/pgp/$key.asc\")\ndone\n\n# assert that they really are controlled by git\nif (( ${#needsversioning[*]} )); then\n    for file in \"${needsversioning[@]}\"; do\n        # skip none existing files\n        if [[ ! -f \"${file}\" ]]; then\n            continue\n        fi\n        if ! git ls-files --error-unmatch \"$file\"; then\n            die \"%s is not under version control\" \"$file\"\n        fi\n    done\nfi\n\n\n# check packages for validity\nfor _arch in \"${arch[@]}\"; do\n    for _pkgname in \"${pkgname[@]}\"; do\n        fullver=$(get_full_version \"$_pkgname\")\n\n        if pkgfile=$(find_cached_package \"$_pkgname\" \"$fullver\" \"$_arch\"); then\n            check_package_validity \"$pkgfile\"\n        fi\n    done\n\n    fullver=$(get_full_version \"$pkgbase\")\n    if pkgfile=$(find_cached_package \"$pkgbase-debug\" \"$fullver\" \"$_arch\"); then\n        check_package_validity \"$pkgfile\"\n    fi\ndone\n\n# NOTE: not a reality on the AUR...\n# check for PKGBUILD standards\n#check_pkgbuild_validity\n\n# auto generate .SRCINFO\n# shellcheck disable=SC2119\nwrite_srcinfo_file\n\n\nif (( release_commit )); then\n    git add --force .SRCINFO\n\n    if [[ -n $(git status --porcelain --untracked-files=no) ]]; then\n        stat_busy 'Staging files'\n        for f in $(git ls-files --modified); do\n            git add \"$f\"\n        done\n        for f in $(git ls-files --deleted); do\n            git rm \"$f\"\n        done\n        stat_done\n\n        msgtemplate=\"upgpkg: $(get_full_version)\"\n        if [[ -n $1 ]]; then\n            stat_busy 'Committing changes'\n            git commit -q -m \"${msgtemplate}: ${1}\" || die\n            stat_done\n        else\n            [[ -z ${WORKDIR:-} ]] && setup_workdir\n            msgfile=$(mktemp --tmpdir=\"${WORKDIR}\" commitpkg.XXXXXXXXXX)\n            echo \"$msgtemplate\" > \"$msgfile\"\n            if [[ -n $GIT_EDITOR ]]; then\n                $GIT_EDITOR \"$msgfile\" || die\n            elif giteditor=$(git config --get core.editor); then\n                $giteditor \"$msgfile\" || die\n            elif [[ -n $VISUAL ]]; then\n                $VISUAL \"$msgfile\" || die\n            elif [[ -n $EDITOR ]]; then\n                $EDITOR \"$msgfile\" || die\n            else\n                die \"No usable editor found (tried \\$GIT_EDITOR, git config [core.editor], \\$VISUAL, \\$EDITOR).\"\n            fi\n            [[ -s $msgfile ]] || die\n            stat_busy 'Committing changes'\n            git commit -v -q -F \"$msgfile\" || die\n            unlink \"$msgfile\"\n            stat_done\n        fi\n    fi\n\n    if (( release_push )); then\n        git_remote_branch=$(git rev-parse --abbrev-ref --symbolic-full-name \"@{u}\")\n        git_remote=${git_remote_branch%/*}\n        git_remote_url=$(git remote get-url \"$git_remote\")\n\n        msg 'Fetching remote changes'\n        git fetch --prune --prune-tags origin || die 'failed to fetch remote changes'\n\n        # Check if local branch is up to date and contains the latest origin commit\n        if remoteref=$(git rev-parse \"$git_remote_branch\" 2>/dev/null); then\n            if [[ $(git branch \"$branchname\" --contains \"$remoteref\" --format '%(refname:short)') != \"$branchname\" ]]; then\n                die \"local branch is out of date, run 'git pull --rebase'\"\n            fi\n        fi\n\n        msg \"Pushing commits to $git_remote_branch where $git_remote is $git_remote_url\"\n        git push --tags --set-upstream \"$git_remote\" \"$branchname\" || abort\n    else\n        warning \"Not pushing commits because --push was not given.\"\n    fi\nelif (( release_push )); then\n    warning \"Not pushing commits because --no-commit was given.\"\nfi\n\n\ndeclare -a uploads\n\nfor _arch in \"${arch[@]}\"; do\n    for _pkgname in \"${pkgname[@]}\"; do\n        fullver=$(get_full_version \"$_pkgname\")\n        if ! pkgfile=$(find_cached_package \"$_pkgname\" \"$fullver\" \"${_arch}\"); then\n            warning \"Skipping %s: failed to locate package file\" \"$_pkgname-$fullver-$_arch\"\n            continue 2\n        fi\n        uploads+=(\"$pkgfile\")\n    done\n\n    fullver=$(get_full_version \"$pkgbase\")\n    if ! pkgfile=$(find_cached_package \"$pkgbase-debug\" \"$fullver\" \"$_arch\"); then\n        continue\n    fi\n    if ! is_debug_package \"$pkgfile\"; then\n        continue\n    fi\n    uploads+=(\"$pkgfile\")\ndone\n\nfor pkgfile in \"${uploads[@]}\"; do\n    sigfile=\"${pkgfile}.sig\"\n    if [[ ! -f $sigfile ]]; then\n        msg \"Signing package %s...\" \"${pkgfile}\"\n        if [[ -n $GPGKEY ]]; then\n            SIGNWITHKEY=(-u \"${GPGKEY}\")\n        fi\n        gpg --detach-sign --use-agent --no-armor \"${SIGNWITHKEY[@]}\" \"${pkgfile}\" || die\n    fi\n    if ! gpg --verify \"$sigfile\" \"$pkgfile\" >/dev/null 2>&1; then\n        die \"Signature %s is incorrect!\" \"$sigfile\"\n    fi\n    uploads+=(\"$sigfile\")\ndone\n\n\nif (( release_upload )) && [[ ${#uploads[*]} -gt 0 ]]; then\n    new_uploads=()\n\n    # convert to absolute paths so rsync can work with colons (epoch)\n    while read -r -d '' upload; do\n        new_uploads+=(\"$upload\")\n    done < <(realpath -z \"${uploads[@]}\")\n\n    uploads=(\"${new_uploads[@]}\")\n    unset new_uploads\n\n    msg 'Uploading all package and signature files'\n    rsync \"${rsyncopts[@]}\" \"${uploads[@]}\" \"$server:$remote_path/\" || die\n\n    # convert to remote paths\n    declare -a remote_pkgfiles\n    for pkgfile in \"${uploads[@]}\"; do\n        if ! [[ \"$pkgfile\" = *.sig ]]; then\n            remote_pkgfiles+=(\"$remote_path\"/\"$(basename \"$pkgfile\")\")\n        fi\n    done\n\n    msg 'Updating remote pacman database'\n    ssh -t \"${SSH_OPTS[@]}\" -- \"$server\" \"./repo add ${remote_pkgfiles[*]@Q} && ./repo update\"\nfi\n"
  },
  {
    "path": "aur-remotebuild",
    "content": "#!/bin/bash\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n# aur-remotebuild - build packages remotely using aur-chroot\n# (partly based on offload-build from devtools)\nset -o errexit\nshopt -s nullglob\nreadonly argv0=remotebuild\n\nif [[ ! -f PKGBUILD ]]; then\n    echo \"No PKGBUILD in the current directory!\" >&2\n    exit 1\nfi\n\n# default arguments\nrepo_name=lahwaacz\nchroot_args=(\n    --create\n    --update\n    --build\n    # makechrootpkg options\n    --checkpkg\n    --namcap\n)\n\nusage() {\n    printf >&2 'Usage: %s HOSTNAME [--repo NAME] [--inspect never|always|failure] [--] <aur-chroot args>\\n' \"$argv0\"\n    exit 1\n}\n\nserver=\"$1\"\nshift\nif [[ \"$server\" == \"\" ]]; then\n    usage\nfi\n\nsource /usr/share/makepkg/util/parseopts.sh\n\n## option parsing\nopt_short='d:'\nopt_long=('inspect:')\nopt_hidden=()\n\nif ! parseopts \"$opt_short\" \"${opt_long[@]}\" \"${opt_hidden[@]}\" -- \"$@\"; then\n    usage\nfi\nset -- \"${OPTRET[@]}\"\n\nwhile true; do\n    case \"$1\" in\n        --repo)\n            shift;\n            repo_name=$1\n            ;;\n        --inspect)\n            shift;\n            chroot_args+=(--makechrootpkg-args=\"-x,$1\")\n            ;;\n        --)\n            shift;\n            break\n            ;;\n    esac\n    shift\ndone\n\n# pass db name to aur-chroot\nchroot_args+=(\n    --pacman-conf \"/etc/aurutils/pacman-$repo_name.conf\"\n    --makepkg-conf \"/etc/aurutils/makepkg-$repo_name.conf\"\n)\n\n# pass remaining arguments to aur-chroot\nif (($#)); then\n    chroot_args+=(\"$@\")\nfi\n\n\n# aur chroot command to run remotely\naur_chroot_cmd=(\n    env\n    # aur-chroot does not preserve SRCDEST and LOGDEST by default https://github.com/aurutils/aurutils/issues/1196\n    AUR_PACMAN_AUTH=\"sudo --preserve-env=GNUPGHOME,SSH_AUTH_SOCK,SRCDEST,PKGDEST,LOGDEST\"\n    # NOTE: do not clear SRCDEST to use cached directory set in the user's makepkg.conf on the remote host\n    #SRCDEST=\"\"\n    PKGDEST=\"\"\n    LOGDEST=\"\"\n    aur\n    chroot\n    \"${chroot_args[@]}\"\n)\n\n\n### offload-build-like part\nsource /usr/share/devtools/lib/common.sh\nsource /usr/share/devtools/lib/util/makepkg.sh\nsource /usr/share/devtools/lib/util/srcinfo.sh\nsource /usr/share/makepkg/util/config.sh\n\n[[ -z ${WORKDIR:-} ]] && setup_workdir\nTEMPDIR=$(mktemp --tmpdir=\"${WORKDIR}\" --directory aur-remotebuild.XXXXXXXXXX)\nexport TEMPDIR\n\n# Load makepkg.conf variables to be available\n# shellcheck disable=SC2119\nload_makepkg_config\n\n# Use a source-only tarball as an intermediate to transfer files. This\n# guarantees the checksums are okay, and guarantees that all needed files are\n# transferred, including local sources, install scripts, and changelogs.\nexport SRCPKGDEST=\"${TEMPDIR}\"\nmakepkg_source_package || die \"unable to make source package\"\n\n# Temporary cosmetic workaround makepkg if SRCDEST is set somewhere else\n# but an empty src dir is created in PWD. Remove once fixed in makepkg.\nrmdir --ignore-fail-on-non-empty src 2>/dev/null || true\n\n# Create a temporary directory on the server\nremote_temp=$(\n    ssh \"${SSH_OPTS[@]}\" -- \"$server\" '\n        temp=\"${XDG_CACHE_HOME:-$HOME/.cache}/aur-remotebuild\" &&\n        mkdir -p \"$temp\" &&\n        mktemp --directory --tmpdir=\"$temp\"\n')\n\n# Transfer the srcpkg to the server\nmsg \"Transferring source package to the server...\"\n_srcpkg=(\"$SRCPKGDEST\"/*\"$SRCEXT\")\nsrcpkg=\"${_srcpkg[0]}\"\nrsync \"${RSYNC_OPTS[@]}\" -- \"$srcpkg\" \"$server\":\"$remote_temp\" || die\n\n# Prepare the srcpkg on the server\nmsg \"Extracting srcpkg\"\nssh \"${SSH_OPTS[@]}\" -- \"$server\" \"cd ${remote_temp@Q} && bsdtar --strip-components 1 -xvf $(basename \"$srcpkg\")\" || die\n\n# Run the build command on the server\nmsg \"Running aur chroot ${chroot_args[*]}\"\n# shellcheck disable=SC2145\nif ssh \"${SSH_OPTS[@]}\" -t -- \"$server\" \"cd ${remote_temp@Q} && ${aur_chroot_cmd[@]@Q}\"; then\n    msg \"Build complete\"\n\n    # Get an array of files that should be downloaded from the server\n    mapfile -t files < <(\n        ssh \"${SSH_OPTS[@]}\" -- \"$server\" \"\n            cd ${remote_temp@Q}\"' &&\n            while read -r file; do\n                [[ -f \"${file}\" ]] && printf \"%s\\n\" \"${file}\" ||:\n            done < <(SRCDEST=\"\" PKGDEST=\"\" LOGDEST=\"\" makepkg --packagelist) &&\n            printf \"%s\\n\" '\"${remote_temp@Q}/PKGBUILD\"'\n\n            find '\"${remote_temp@Q}\"' -name \"*.log\"\n    ')\nelse\n    # Build failed, only the logs should be downloaded from the server\n    mapfile -t files < <(\n        ssh \"${SSH_OPTS[@]}\" -- \"$server\" '\n            find '\"${remote_temp@Q}\"' -name \"*.log\"\n    ')\nfi\n\n\nif (( ${#files[@]} )); then\n    msg 'Downloading files...'\n    rsync \"${RSYNC_OPTS[@]}\" -- \"${files[@]/#/$server:}\" \"${TEMPDIR}/\" || die\n\n    if is_globfile \"${TEMPDIR}\"/*.log; then\n        # shellcheck disable=SC2031\n        mv \"${TEMPDIR}\"/*.log \"${LOGDEST:-${PWD}}/\"\n    fi\n    if is_globfile \"${TEMPDIR}\"/*.pkg.tar*; then\n        # Building a package may change the PKGBUILD during update_pkgver\n        # shellcheck disable=SC2031\n        mv \"${TEMPDIR}/PKGBUILD\" \"${PWD}/\"\n        # shellcheck disable=SC2031\n        mv \"${TEMPDIR}\"/*.pkg.tar* \"${PKGDEST:-${PWD}}/\"\n    else\n        # shellcheck disable=SC2031\n        error \"Build failed, check logs in ${LOGDEST:-${PWD}}\"\n        exit 1\n    fi\n\n    # auto generate .SRCINFO\n    # shellcheck disable=SC2119\n    write_srcinfo_file\n\n    msg \"Removing remote temporary directory $remote_temp\"\n    ssh \"${SSH_OPTS[@]}\" -- \"$server\" \"rm -rf -- ${remote_temp@Q}\"\nelse\n    exit 1\nfi\n"
  },
  {
    "path": "backup-system.sh",
    "content": "#!/bin/bash\n\n# exit on first error\nset -e\n\nbackupdir=\"/media/WD-black/backups\"\n\n# check if destination dir exists\nif [[ ! -d \"$backupdir\" ]]; then\n    echo \"Backup directory $backupdir does not exist. Is the drive mounted?\"\n    exit 1\nfi\n\n#homedir=\"$backupdir/home_rsync_copy\"\n#rootdir=\"$backupdir/root_rsync_copy\"\n\n#echo \"Syncing / to $rootdir (root permissions required)\"\n#sudo rsync / \"$rootdir\" -aPhAHX --info=progress2,name0,stats2 --delete --exclude={\"/dev/*\",\"/proc/*\",\"/sys/*\",\"/tmp/*\",\"/run/*\",\"/mnt/*\",\"/media/*\",\"/lost+found\",\"/home\",\"/swapfile\",\"/.snapshots\"}\n\n#echo \"Syncing ~/ to $homedir\"\n#rsync ~/ $homedir -aPhAHX --one-file-system --info=progress2,name0,stats2 --delete\n\n\n# TODO:\n# - make snapshot with snapper just before btrfs-sync\n# - run `sync` before btrfs-sync to make sure that the snapshot is fully written to the disk\n# - copy the snapper metadata files (info.xml)\n# - make snapshots of the remaining subvolumes: @postgres @nspawn_containers @var_log\n\necho \"Syncing /.snapshots to $backupdir/root (root permissions required)\"\nsudo btrfs-sync --verbose --delete /.snapshots \"$backupdir/root\"\n\necho \"Syncing /home/.snapshots to $backupdir/home (root permissions required)\"\nsudo btrfs-sync --verbose --delete /home/.snapshots \"$backupdir/home\"\n"
  },
  {
    "path": "batmanpager",
    "content": "#!/bin/sh\n\n# mandoc passes a file name, other tools write to stdout\n# using `cat \"$@\"` we take care of both reading from file and stdin\n# https://github.com/sharkdp/bat/issues/1145#issuecomment-1743518097\nexec cat \"$@\" | col -bx | bat --language man --style plain --pager \"$PAGER\"\n"
  },
  {
    "path": "bsnap.sh",
    "content": "#! /usr/bin/bash\n\n# exit on first error\nset -e\n\nbackupdir=\"$HOME/_backup_snapshots\"\n\nusage() {\n    echo $@ >&2\n    echo \"Usage: $0 {snapshot|transfer} ...\n\n    snapshot        Create snapshots for every subvolume configured in '\\$backupdir/*'.\n                    The subvolume is specified by a symlink '\\$backupdir/*/cur'\n                    pointing to a Btrfs subvolume.\n\n    transfer <dst>  Transfer all snapshots from '\\$backupdir/*/' to '<dst>/', which\n                    should be other Btrfs partition. The tree structure is kept\n                    intact.\n\n    \\$backupdir is set to '$backupdir'\n\" >&2\n}\n\ntransfer() {\n    src=\"$1\"    # e.g. ~/_backup_snapshots/Bbox/\n    dst=\"$2\"    # e.g. /media/WD1T/backup-lahwaacz/Bbox/\n\n    [[ ! -d \"$dst\" ]] && mkdir \"$dst\"\n\n    # get list of snapshots to transfer\n    src_snapshots=($(find \"$src\" -mindepth 1 -maxdepth 1 -type d | sort))\n\n    _len=${#src_snapshots[@]}\n    for ((i=0; i<$_len; i++)); do\n        if [[ -e \"$dst/$(basename ${src_snapshots[$i]})\" ]]; then\n            # nothing to transfer\n            echo \"Snapshot '$dst/$(basename ${src_snapshots[$i]})' already exists\"\n            continue\n        fi\n\n        # There is currently an issue that the snapshots to be used with \"btrfs send\"\n        # must be physically on the disk, or you may receive a \"stale NFS file handle\"\n        # error. This is accomplished by \"sync\" after the snapshot\n        #\n        # ref: http://marc.merlins.org/perso/btrfs/post_2014-03-22_Btrfs-Tips_-Doing-Fast-Incremental-Backups-With-Btrfs-Send-and-Receive.html\n        sync\n\n        dst_snapshots=($(find \"$dst\" -mindepth 1 -maxdepth 1 -type d | sort))\n\n        if [[ $i -eq 0 ]]; then\n            # no parent, make initial transfer\n            sudo sh -c \"btrfs send ${src_snapshots[$i]} | btrfs receive $dst\"\n        else\n            sudo sh -c \"btrfs send -p ${src_snapshots[(($i-1))]} ${src_snapshots[$i]} | btrfs receive $dst\"\n        fi\n\n    done\n\n\n    \n}\n\ncase $1 in\n    snapshot)\n        for dir in \"$backupdir\"/*; do\n            if [[ -L \"$dir/cur\" ]]; then\n                btrfs subvolume snapshot -r $(realpath \"$dir/cur\") \"$dir/$(date +%F-%T)\"\n            else\n                echo \"$dir/cur does not exist or is not a symlink\"\n            fi\n        done\n        ;;\n    transfer)\n        [ -n \"$2\" -a -d \"$2\" ] || usage \"Invalid destination path\"\n\n        for dir in \"$backupdir\"/*; do\n            transfer \"$dir\" \"$2\"/$(basename \"$dir\")\n        done\n        ;;\n    *)\n        usage \"Incorrect invocation\"\nesac\n"
  },
  {
    "path": "btrfs-diff",
    "content": "#!/bin/bash\n\n# Author: http://serverfault.com/users/96883/artfulrobot\n# License: Unknown\n#\n# This script will show most files that got modified or added.\n# Renames and deletions will not be shown.\n# Read limitations on:\n# http://serverfault.com/questions/399894/does-btrfs-have-an-efficient-way-to-compare-snapshots\n# \n# btrfs send is the best way to do this long term, but as of kernel\n# 3.14, btrfs send cannot just send a list of changed files without\n# scanning and sending all the changed data blocks along.\n\nusage() { echo $@ >&2; echo \"Usage: $0 <older-snapshot> <newer-snapshot>\" >&2; exit 1; }\n\n[ $# -eq 2 ] || usage \"Incorrect invocation\";\nSNAPSHOT_OLD=$1;\nSNAPSHOT_NEW=$2;\n\n[ -d $SNAPSHOT_OLD ] || usage \"$SNAPSHOT_OLD does not exist\";\n[ -d $SNAPSHOT_NEW ] || usage \"$SNAPSHOT_NEW does not exist\";\n\nOLD_TRANSID=`btrfs subvolume find-new \"$SNAPSHOT_OLD\" 9999999`\nOLD_TRANSID=${OLD_TRANSID#transid marker was }\n[ -n \"$OLD_TRANSID\" -a \"$OLD_TRANSID\" -gt 0 ] || usage \"Failed to find generation for $SNAPSHOT_NEW\"\n\nbtrfs subvolume find-new \"$SNAPSHOT_NEW\" $OLD_TRANSID | sed '$d' | cut -f17- -d' ' | sort | uniq\n"
  },
  {
    "path": "btrfs-sync",
    "content": "#!/bin/bash\n\n#\n# Simple script that synchronizes BTRFS snapshots locally.\n# Features compression, retention policy and automatic incremental sync\n#\n\nset -e\nset -o pipefail\nset -o errtrace\n\nprint_usage() {\n  echo \"Usage:\n  $BIN [options] <src> [<src>...] <dir>\n\n  -k|--keep NUM     keep only last <NUM> sync'ed snapshots\n  -d|--delete       delete snapshots in <dst> that don't exist in <src>\n  -q|--quiet        don't display progress\n  -v|--verbose      display more information\n  -h|--help         show usage\n\n<src> can either be a single snapshot, or a folder containing snapshots\n\"\n}\n\nechov() { if [[ \"$VERBOSE\" == 1 ]]; then echo \"$@\"; fi }\n\n#----------------------------------------------------------------------------------------------------------\n\n# preliminary checks\nBIN=\"${0##*/}\"\n[[ $# -lt 2      ]] && { print_usage                                ; exit 1; }\n[[ ${EUID} -ne 0 ]] && { echo \"Must be run as root. Try 'sudo $BIN'\"; exit 1; }\n\n# parse arguments\nKEEP=0\n\nOPTS=$( getopt -o hqzZk:p:dv -l quiet -l help -l keep: -l delete -l verbose -- \"$@\" 2>/dev/null )\n[[ $? -ne 0 ]] && { echo \"error parsing arguments\"; exit 1; }\neval set -- \"$OPTS\"\n\nwhile true; do\n  case \"$1\" in\n    -h|--help   ) print_usage; exit  0 ;;\n    -q|--quiet  ) QUIET=1    ; shift 1 ;;\n    -d|--delete ) DELETE=1   ; shift 1 ;;\n    -k|--keep   ) KEEP=$2    ; shift 2 ;;\n    -v|--verbose) VERBOSE=1  ; shift 1 ;;\n    --)                shift;  break   ;;\n  esac\ndone\n\n# detect src and dst arguments\nSRC=( \"${@:1:$#-1}\" )\nDST=\"${@: -1}\"\n\ntest -x \"$SRC\" &>/dev/null || {\n  echo \"Access error. Do you have adequate permissions for $SRC?\"\n  exit 1\n}\n\ntest -x \"$DST\" &>/dev/null || {\n  echo \"Access error. Do you have adequate permissions for $DST?\"\n  exit 1\n}\n\n#----------------------------------------------------------------------------------------------------------\n\n# more checks\n\n## don't overlap\nif pgrep -F /run/btrfs-sync.pid &>/dev/null; then\n  echo \"$BIN is already running\"\n  exit 1\nfi\necho $$ > /run/btrfs-sync.pid\n\n## src checks\nechov \"* Check source\"\nSRCS=()\nSRCS_BASE=()\nfor s in \"${SRC[@]}\"; do\n  src=\"$(realpath \"$s\")\"\n  if ! test -e \"$src\"; then\n    echo \"$s not found\"\n    exit 1\n  fi\n  # check if the src is a read-only subvolume\n  if btrfs subvolume show \"$src\" &>/dev/null && [[ \"$(btrfs property get -ts \"$src\")\" == \"ro=true\" ]]; then\n    SRCS+=(\"$src\")\n    SRCS_BASE+=(\"$src\")\n  else\n    for dir in $( find \"$src\" -maxdepth 2 -type d ); do\n      # check if the src is a read-only subvolume\n      if btrfs subvolume show \"$dir\" &>/dev/null && [[ \"$(btrfs property get -ts \"$dir\")\" == \"ro=true\" ]]; then\n        SRCS+=(\"$dir\")\n        SRCS_BASE+=(\"$src\")\n      fi\n    done\n  fi\ndone\nif [[ ${#SRCS[@]} -eq 0 ]]; then\n  echo \"no BTRFS subvolumes found\"\n  exit 1\nfi\n\n## use 'pv' command if available\nPV=( pv -F\"time elapsed [%t] | rate %r | total size [%b]\" )\nif [[ \"$QUIET\" == \"1\" ]]; then\n  PV=( cat )\nelse\n  if ! type pv &>/dev/null; then\n    echo \"INFO: install the 'pv' package in order to get a progress indicator\"\n    PV=( cat )\n  fi\nfi\n\n#----------------------------------------------------------------------------------------------------------\n\n# sync snapshots\n\nget_dst_snapshots() {      # sets DSTS DST_UUIDS\n  local DST=\"$1\"\n  DSTS=()\n  DST_UUIDS=()\n  for dir in $( find \"$DST\" -maxdepth 2 -type d ); do\n    if btrfs subvolume show \"$dir\" &>/dev/null; then\n      local UUID=$( btrfs subvolume show \"$dir\" 2>/dev/null | grep 'Received UUID' | awk '{ print $3 }' )\n      if [[ \"$UUID\" != \"-\" ]] && [[ \"$UUID\" != \"\" ]]; then\n        DSTS+=(\"$dir\")\n        DST_UUIDS+=(\"$UUID\")\n      fi\n    fi\n  done\n}\n\nchoose_seed() {      # sets SEED\n  local SRC=\"$1\"\n  local SRC_BASE=\"$2\"\n\n  SEED=\"$SEED_NEXT\"\n  if [[ \"$SEED\" == \"\" ]]; then\n    # try to get most recent src snapshot that exists in dst to use as a seed\n    local RXID_CALCULATED=0\n    declare -A PATH_RXID DATE_RXID SHOWP RXIDP DATEP\n    local LIST=\"$( btrfs subvolume list -su \"$SRC\" )\"\n    local SEED_CANDIDATES=()\n    for id in \"${DST_UUIDS[@]}\"; do\n      # try to match by UUID\n      local PATH_=$( awk \"{ if ( \\$14 == \\\"$id\\\" ) print \\$16       }\" <<<\"$LIST\" )\n      local DATE=$(  awk \"{ if ( \\$14 == \\\"$id\\\" ) print \\$11, \\$12 }\" <<<\"$LIST\" )\n\n      # try to match by received UUID, only if necessary\n      if [[ \"$PATH_\" == \"\" ]]; then\n        if [[ \"$RXID_CALCULATED\" == \"0\" ]]; then # create table during the first iteration if needed\n          local PATHS=( $( btrfs subvolume list -u \"$SRC\" | awk '{ print $11 }' ) )\n          for p in \"${PATHS[@]}\"; do\n            SHOWP=\"$( btrfs subvolume show \"$( dirname \"$SRC\" )/$( basename \"$p\" )\" 2>/dev/null )\"\n            RXIDP=\"$( grep 'Received UUID' <<<\"$SHOWP\" | awk '{ print $3     }' )\"\n            DATEP=\"$( grep 'Creation time' <<<\"$SHOWP\" | awk '{ print $3, $4 }' )\"\n            [[ \"$RXIDP\" == \"\" ]] && continue\n            PATH_RXID[\"$RXIDP\"]=\"$p\"\n            DATE_RXID[\"$RXIDP\"]=\"$DATEP\"\n          done\n          RXID_CALCULATED=1\n        fi\n        PATH_=\"${PATH_RXID[\"$id\"]}\"\n        DATE=\"${DATE_RXID[\"$id\"]}\"\n      fi\n\n      if [[ \"$PATH_\" == \"\" ]] || [[ \"$PATH_\" == \"$( basename \"$SRC\" )\" ]]; then\n        continue\n      fi\n\n      # if the path does not exist, it is likely relative to the root subvolume\n      # rather than the mounted subvolume\n      if ! test -d \"$PATH_\" && mountpoint -q \"$SRC_BASE\"; then\n        local SRC_BASE_SUBVOL=$(findmnt -n -o OPTIONS \"$SRC_BASE\" | tr \",\" \"\\n\" | grep \"subvol=\" | awk -F '=' '{ print $2 }')\n        # drop the leading slash\n        SRC_BASE_SUBVOL=\"${SRC_BASE_SUBVOL#/}\"\n        # replace the prefix in $PATH_\n        if [[ \"$PATH_\" =~ \"$SRC_BASE_SUBVOL\"* ]]; then\n          PATH_=\"${PATH_#${SRC_BASE_SUBVOL}}\"\n          PATH_=\"$SRC_BASE/$PATH_\"\n        fi\n      fi\n\n      local SECS=$( date -d \"$DATE\" +\"%s\" )\n      SEED_CANDIDATES+=(\"$SECS|$PATH_\")\n    done\n    SEED=$(IFS=$'\\n' echo \"${SEED_CANDIDATES[@]}\" | sort -V | tail -1 | cut -f2 -d'|')\n  fi\n}\n\nexists_at_dst() {\n  local SHOW=\"$( btrfs subvolume show \"$SRC\" )\"\n\n  local SRC_UUID=\"$( grep 'UUID:' <<< \"$SHOW\" | head -1 | awk '{ print $2 }' )\"\n  grep -q \"$SRC_UUID\" <<<\"${DST_UUIDS[@]}\" && return 0;\n\n  local SRC_RXID=\"$( grep 'Received UUID' <<< \"$SHOW\"   | awk '{ print $3 }' )\"\n  grep -q \"^-$\"       <<<\"$SRC_RXID\"       && return 1;\n  grep -q \"$SRC_RXID\" <<<\"${DST_UUIDS[@]}\" && return 0;\n\n  return 1\n}\n\n## sync incrementally\nsync_snapshot() {\n  local SRC=\"$1\"\n  local SRC_BASE=\"$2\"\n  if ! test -d \"$SRC\" || ! test -d \"$SRC_BASE\"; then\n    return\n  fi\n\n  if exists_at_dst \"$SRC\"; then\n    echov \"* Skip existing '$SRC'\"\n    return 0\n  fi\n\n  choose_seed \"$SRC\" \"$SRC_BASE\"  # sets SEED\n  echo \"SEED=$SEED\"\n\n  # incremental sync argument\n  if [[ \"$SEED\" != \"\" ]]; then\n    if test -d \"$SEED\"; then\n      # Sends the difference between the new snapshot and old snapshot to the\n      # backup location. Using the -c flag instead of -p tells it that there\n      # is an identical subvolume to the old snapshot at the receiving\n      # location where it can get its data. This helps speed up the transfer.\n      local SEED_ARG=( -c \"$SEED\" )\n    else\n      echo \"INFO: couldn't find $SEED. Non-incremental mode\"\n    fi\n  fi\n\n  # destination path where the subvolume will be sent\n  local DST_SUBVOL=\"$DST/$( realpath --relative-to \"$SRC_BASE\" \"$SRC\" )\"\n  if test -d \"$DST_SUBVOL\"; then\n    echo \"ERROR: destination directory $DST_SUBVOL already exists, but was not detected as a Btrfs subvolume.\" >&2\n    return 1\n  fi\n\n  # create the parent directory at destination\n  mkdir -p \"$(dirname \"$DST_SUBVOL\")\"\n\n  # print info\n  echo -n \"* Synchronizing '$SRC' to '$DST_SUBVOL'\"\n  if [[ \"$SEED\" != \"\" ]]; then\n    echov -n \" using seed '$SEED'\"\n  fi\n  echo \"...\"\n\n  # do it\n  btrfs send -q \"${SEED_ARG[@]}\" \"$SRC\" \\\n    | \"${PV[@]}\" \\\n    | btrfs receive \"$(dirname \"$DST_SUBVOL\")\" 2>&1 \\\n    | (grep -v -e'^At subvol ' -e'^At snapshot ' || true) \\\n    || {\n      btrfs subvolume delete \"$DST_SUBVOL\" 2>/dev/null\n      return 1;\n    }\n\n  # update DST list\n  DSTS+=(\"$DST_SUBVOL\")\n  DST_UUIDS+=(\"$SRC_UUID\")\n  SEED_NEXT=\"$SRC\"\n}\n\n#----------------------------------------------------------------------------------------------------------\n\n# sync all snapshots found in src\nechov \"* Check destination\"\nget_dst_snapshots \"$DST\" # sets DSTS DST_UUIDS\nfor (( i=0; i<\"${#SRCS[@]}\"; i++ )); do\n  src=\"${SRCS[$i]}\"\n  src_base=\"${SRCS_BASE[$i]}\"\n  sync_snapshot \"$src\" \"$src_base\" && RET=0 || RET=1\n#  for i in 1 2; do\n#    [[ \"$RET\" != \"1\" ]] && break\n#    echo \"* Retrying '$src'...\"\n#    sync_snapshot \"$src\" && RET=0 || RET=1\n#  done\n  if [[ \"$RET\" == \"1\" ]]; then\n    echo \"Abort\"\n    exit 1\n  fi\ndone\n\n#----------------------------------------------------------------------------------------------------------\n\n# retention policy\nif [[ \"$KEEP\" != 0 ]] && [[ ${#DSTS[@]} -gt $KEEP ]]; then\n  echo \"* Pruning old snapshots...\"\n  for (( i=0; i < $(( ${#DSTS[@]} - KEEP )); i++ )); do\n    PRUNE_LIST+=( \"${DSTS[$i]}\" )\n  done\n  btrfs subvolume delete \"${PRUNE_LIST[@]}\"\nfi\n\n# delete flag\nif [[ \"$DELETE\" == 1 ]]; then\n  for dst in \"${DSTS[@]}\"; do\n    FOUND=0\n#    for src in \"${SRCS[@]}\"; do\nfor (( i=0; i<\"${#SRCS[@]}\"; i++ )); do\n  src=\"${SRCS[$i]}\"\n  echo \"checking $src\"\n      if [[ \"$( basename $src )\" == \"$( basename $dst )\" ]]; then\n        FOUND=1\n        break\n      fi\n    done\n    if [[ \"$FOUND\" == 0 ]]; then\n      DEL_LIST+=( \"$dst\" )\n    fi\n  done\n  if [[ \"$DEL_LIST\" != \"\" ]]; then\n    echo \"* Deleting non existent snapshots...\"\n    btrfs subvolume delete \"${DEL_LIST[@]}\"\n  fi\nfi\n"
  },
  {
    "path": "btrfs-sync-WIP",
    "content": "#!/bin/bash\n\nset -o errtrace\n\nversion=\"0.0\"\nname=\"btrfs-sync\"\n\nSNAPPER_CONFIG=/etc/conf.d/snapper\n\nTMPDIR=$(mktemp -d)\nPIPE=$TMPDIR/$name.out\nmkfifo $PIPE\nsystemd-cat -t \"$name\" < $PIPE &\nexec 3>$PIPE\n\ndonotify=0\nwhich notify-send &> /dev/null\nif [[ $? -ne 0 ]]; then\n    donotify=1\nfi\n\nerror() { \n    printf \"==> ERROR: %s\\n\" \"$@\"\n    notify_error 'Error' 'Check journal for more information.'\n} >&2\n\ndie() { \n    error \"$@\"\n    exit 1\n}\n\ntraperror() {\n    printf \"Exited due to error on line %s.\\n\" $1\n    printf \"exit status: %s\\n\" \"$2\"\n    printf \"command: %s\\n\" \"$3\"\n    printf \"bash line: %s\\n\" \"$4\"\n    printf \"function name: %s\\n\" \"$5\"\n    exit 1\n}\n\ntrapkill() { \n    die \"Exited due to user intervention.\" \n}\n\ntrap 'traperror ${LINENO} $? \"$BASH_COMMAND\" $BASH_LINENO \"${FUNCNAME[@]}\"' ERR\ntrap trapkill SIGTERM SIGINT\n\nusage() {\n  cat <<EOF\n$name $version\nUsage: $name [options]\n\nOptions:\n -c, --config <config>    snapper configuration to backup\n -d, --description <desc> snapper description\n -h, --help               print this message\n -n, --noconfirm          do not ask for confirmation\n -q, --quiet              do not send notifications; instead print them.\n -s, --subvolid <subvlid> subvolume id of the mounted BTRFS subvolume to back up to\n -u, --UUID <UUID>        UUID of the mounted BTRFS subvolume to back up to\n\nSee 'man snap-sync' for more details.\nEOF\n}\n\nwhile [[ $# -gt 0 ]]; do\n    key=\"$1\"\n    case $key in\n        -d|--description)\n            description=\"$2\"\n            shift 2\n        ;;\n        -c|--config)\n            selected_configs=\"$2\"\n            shift 2\n        ;;\n        -u|--UUID)\n            uuid_cmdline=\"$2\"\n            shift 2\n        ;;\n        -s|--subvolid)\n            subvolid_cmdline=\"$2\"\n            shift 2\n        ;;\n        -n|--noconfirm)\n            noconfirm=\"yes\"\n            shift\n        ;;\n        -h|--help)\n            usage\n            exit 1\n        ;;\n        -q|--quiet)\n            donotify=1\n            shift\n        ;;\n        *)\n            die \"Unknown option: '$key'. Run '$name -h' for valid options.\"\n        ;;\n    esac\ndone\n\nnotify() {\n    for u in $(users | tr ' ' '\\n' | sort -u); do\n        sudo -u $u DISPLAY=:0 \\\n        DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$(sudo -u $u id -u)/bus \\\n        notify-send -a $name \"$1\" \"$2\" --icon=\"dialog-$3\"\n    done\n}\n\nnotify_info() {\n    if [[ $donotify -eq 0 ]]; then\n        notify \"$1\" \"$2\" \"information\"\n    else\n        printf \"$1: $2\\n\"\n    fi\n}\n\nnotify_error() {\n    if [[ $donotify -eq 0 ]]; then\n        notify \"$1\" \"$2\" \"error\"\n    else\n        printf \"$1: $2\\n\"\n    fi\n}\n\n[[ $EUID -ne 0 ]] && die \"Script must be run as root. See '$name -h' for a description of options\" \n! [[ -f $SNAPPER_CONFIG ]] && die \"$SNAPPER_CONFIG does not exist.\"\n\ndescription=${description:-\"latest incremental backup\"}\nuuid_cmdline=${uuid_cmdline:-\"none\"}\nsubvolid_cmdline=${subvolid_cmdline:-\"5\"}\nnoconfirm=${noconfirm:-\"no\"}\n\nif [[ \"$uuid_cmdline\" != \"none\" ]]; then\n    notify_info \"Backup started\" \"Starting backups to $uuid_cmdline subvolid=$subvolid_cmdline...\"\nelse\n    notify_info \"Backup started\" \"Starting backups. Use command line menu to select disk.\"\nfi\n\nif [[ \"$(findmnt -n -v --target / -o FSTYPE)\" == \"btrfs\" ]]; then\n    EXCLUDE_UUID=$(findmnt -n -v -t btrfs --target / -o UUID)\n    TARGETS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $2}')\n    UUIDS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $1}')\nelse\n    TARGETS=$(findmnt -n -v -t btrfs -o TARGET --list)\n    UUIDS=$(findmnt -n -v -t btrfs -o UUID --list)\nfi\n\ndeclare -a TARGETS_ARRAY\ndeclare -a UUIDS_ARRAY\ndeclare -a SUBVOLIDS_ARRAY\n\ni=0\nfor x in $TARGETS; do\n    SUBVOLIDS_ARRAY[$i]=$(btrfs subvolume show $x | awk '/Subvolume ID:/ { print $3 }')\n    TARGETS_ARRAY[$i]=$x\n    i=$((i+1))\ndone\n\ni=0\ndisk=-1\ndisk_count=0\nfor x in $UUIDS; do\n    UUIDS_ARRAY[$i]=$x\n    if [[ \"$x\" == \"$uuid_cmdline\" && ${SUBVOLIDS_ARRAY[$((i))]} == \"$subvolid_cmdline\" ]]; then\n        disk=$i\n        disk_count=$(($disk_count+1))\n    fi\n    i=$((i+1))\ndone\n\nif [[ \"${#UUIDS_ARRAY[$@]}\" -eq 0 ]]; then\n    die \"No external btrfs subvolumes found to backup to. Run '$name -h' for more options.\"\nfi\n\nif [[ \"$disk_count\" > 1 ]]; then\n    printf \"Multiple mount points were found with UUID %s and subvolid %s.\\n\" \"$uuid_cmdline\" \"$subvolid_cmdline\"\n    disk=\"-1\"\nfi\n\nif [[ \"$disk\" == -1 ]]; then\n    if [[ \"$disk_count\" == 0 && \"$uuid_cmdline\" != \"none\" ]]; then\n        error \"A device with UUID $uuid_cmdline and subvolid $subvolid_cmdline was not found to be mounted, or it is not a BTRFS device.\"\n    fi\n    printf \"Select a mounted BTRFS device on your local machine to backup to.\\nFor more options, exit and run '$name -h'.\\n\"\n    while [[ $disk -lt 0 || $disk -gt $i ]]; do\n        for x in \"${!TARGETS_ARRAY[@]}\"; do\n            printf \"%4s) %s (uuid=%s, subvolid=%s)\\n\" \"$((x+1))\" \"${TARGETS_ARRAY[$x]}\" \"${UUIDS_ARRAY[$x]}\" \"${SUBVOLIDS_ARRAY[$x]}\"\n        done\n        printf \"%4s) Exit\\n\" \"0\"\n        read -e -r -p \"Enter a number: \" disk\n        if ! [[ $disk == ?(-)+([0-9]) ]] || [[ $disk -lt 0 || $disk -gt $i ]]; then\n            printf \"\\nNo disk selected. Select a disk to continue.\\n\"\n            disk=-1\n        fi\n    done\n    if [[ $disk == 0 ]]; then\n        exit 0\n    fi\n    disk=$(($disk-1))\nfi\n\nselected_subvolid=\"${SUBVOLIDS_ARRAY[$((disk))]}\"\nselected_uuid=\"${UUIDS_ARRAY[$((disk))]}\"\nselected_mnt=\"${TARGETS_ARRAY[$((disk))]}\"\nprintf \"\\nYou selected the disk with uuid=%s, subvolid=%s.\\n\" \"$selected_uuid\" \"$selected_subvolid\" | tee $PIPE\nprintf \"The disk is mounted at '%s'.\\n\" \"$selected_mnt\" | tee $PIPE\n\nsource $SNAPPER_CONFIG\n\nif [[ -z $selected_configs ]]; then\n    printf \"\\nInteractively cycling through all snapper configurations...\\n\"\nfi\nselected_configs=${selected_configs:-$SNAPPER_CONFIGS}\n\ndeclare -a BACKUPDIRS_ARRAY\ndeclare -a MYBACKUPDIR_ARRAY\ndeclare -a OLD_NUM_ARRAY\ndeclare -a OLD_SNAP_ARRAY\ndeclare -a NEW_NUM_ARRAY\ndeclare -a NEW_SNAP_ARRAY\ndeclare -a NEW_INFO_ARRAY\ndeclare -a BACKUPLOC_ARRAY\ndeclare -a CONT_BACKUP_ARRAY\n\n# Initial configuration of where backup directories are\ni=0\nfor x in $selected_configs; do\n\n    if [[ \"$(snapper -c $x list -t single | awk '/'\"subvolid=$selected_subvolid, uuid=$selected_uuid\"'/ {cnt++} END {print cnt}')\" -gt 1 ]]; then\n        error \"More than one snapper entry found with UUID $selected_uuid subvolid $selected_subvolid for configuration $x. Skipping configuration $x.\"\n        continue\n    fi\n\n    if [[ \"$(snapper -c $x list -t single | awk '/'$name' backup in progress/ {cnt++} END {print cnt}')\" -gt 0 ]]; then\n        printf \"\\nNOTE: Previous failed %s backup snapshots found for '%s'.\\n\" \"$name\" \"$x\" | tee $PIPE\n        if [[ $noconfirm == \"yes\" ]]; then\n            printf \"'noconfirm' option passed. Failed backups will not be deleted.\\n\" | tee $PIPE\n        else\n            read -e -r -p \"Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N]? \" delete_failed\n            while [[ -n \"$delete_failed\" && \"$delete_failed\" != [Yy]\"es\" &&\n                \"$delete_failed\" != [Yy] && \"$delete_failed\" != [Nn]\"o\" &&\n                \"$delete_failed\" != [Nn] ]]; do\n                read -e -r -p \"Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N] \" delete_failed\n                if [[ -n \"$delete_failed\" && \"$delete_failed\" != [Yy]\"es\" &&\n                \"$delete_failed\" != [Yy] && \"$delete_failed\" != [Nn]\"o\" &&\n                \"$delete_failed\" != [Nn] ]]; then\n                    printf \"Select 'y' or 'N'.\\n\"\n                fi\n            done\n            if [[ \"$delete_failed\" == [Yy]\"es\" || \"$delete_failed\" == [Yy] ]]; then\n                snapper -c $x delete $(snapper -c $x list | awk '/'$name' backup in progress/ {print $1}')\n            fi\n        fi\n    fi\n\n    SNAP_SYNC_EXCLUDE=no\n\n    if [[ -f \"/etc/snapper/configs/$x\" ]]; then\n        source /etc/snapper/configs/$x\n        # TODO: snapper -c \"$x\" --jsonout get-config\n    else\n        die \"Selected snapper configuration $x does not exist.\"\n    fi\n\n    if [[ $SNAP_SYNC_EXCLUDE == \"yes\" ]]; then \n        continue\n    fi\n\n    printf \"\\n\"\n\n    old_num=$(snapper -c \"$x\" list -t single | awk '/'\"subvolid=$selected_subvolid, uuid=$selected_uuid\"'/ {print $1}')\n    old_snap=$SUBVOLUME/.snapshots/$old_num/snapshot\n\n    OLD_NUM_ARRAY[$i]=$old_num\n    OLD_SNAP_ARRAY[$i]=$old_snap\n\n    if [[ -z \"$old_num\" ]]; then\n        printf \"No backups have been performed for '%s' on this disk.\\n\" \"$x\"\n        read -e -r -p \"Enter name of subvolume to store backups, relative to $selected_mnt (to be created if not existing): \" mybackupdir\n        printf \"This will be the initial backup for snapper configuration '%s' to this disk. This could take awhile.\\n\" \"$x\"\n        BACKUPDIR=\"$selected_mnt/$mybackupdir\"\n        test -d \"$BACKUPDIR\" || btrfs subvolume create \"$BACKUPDIR\"\n    else\n        mybackupdir=$(snapper -c \"$x\" list -t single | awk -F\"|\" '/'\"subvolid=$selected_subvolid, uuid=$selected_uuid\"'/ {print $5}' | awk -F \",\" '/backupdir/ {print $1}' | awk -F\"=\" '{print $2}')\n        BACKUPDIR=\"$selected_mnt/$mybackupdir\"\n        test -d $BACKUPDIR || die \"%s is not a directory on %s.\\n\" \"$BACKUPDIR\" \"$selected_uuid\"\n    fi\n    BACKUPDIRS_ARRAY[$i]=\"$BACKUPDIR\"\n    MYBACKUPDIR_ARRAY[$i]=\"$mybackupdir\"\n\n    printf \"Creating new local snapshot for '%s' configuration...\\n\" \"$x\" | tee $PIPE\n#    new_num=$(snapper -c \"$x\" create --print-number -d \"$name backup in progress\")\n    new_num=TODO\n    new_snap=$SUBVOLUME/.snapshots/$new_num/snapshot\n    new_info=$SUBVOLUME/.snapshots/$new_num/info.xml\n    sync\n    backup_location=$BACKUPDIR/$x/$new_num/\n    printf \"Will backup %s to %s\\n\" \"$new_snap\" \"$backup_location/snapshot\" | tee $PIPE\n\n    if (test -d \"$backup_location/snapshot\") ; then\n        printf \"WARNING: Backup directory '%s' already exists. This configuration will be skipped!\\n\" \"$backup_location/snapshot\" | tee $PIPE\n        printf \"Move or delete destination directory and try backup again.\\n\" | tee $PIPE\n    fi\n\n    NEW_NUM_ARRAY[$i]=\"$new_num\"\n    NEW_SNAP_ARRAY[$i]=\"$new_snap\"\n    NEW_INFO_ARRAY[$i]=\"$new_info\"\n    BACKUPLOC_ARRAY[$i]=\"$backup_location\"\n\n    cont_backup=\"K\"\n    CONT_BACKUP_ARRAY[$i]=\"yes\"\n    if [[ $noconfirm == \"yes\" ]]; then\n        cont_backup=\"yes\"\n    else\n        while [[ -n \"$cont_backup\" && \"$cont_backup\" != [Yy]\"es\" &&\n            \"$cont_backup\" != [Yy] && \"$cont_backup\" != [Nn]\"o\" &&\n            \"$cont_backup\" != [Nn] ]]; do\n            read -e -r -p \"Proceed with backup of '$x' configuration [Y/n]? \" cont_backup\n            if [[ -n \"$cont_backup\" && \"$cont_backup\" != [Yy]\"es\" &&\n            \"$cont_backup\" != [Yy] && \"$cont_backup\" != [Nn]\"o\" &&\n            \"$cont_backup\" != [Nn] ]]; then\n                printf \"Select 'Y' or 'n'.\\n\"\n            fi\n        done\n    fi\n\n    if [[ \"$cont_backup\" != [Yy]\"es\" && \"$cont_backup\" != [Yy] && -n \"$cont_backup\" ]]; then\n        CONT_BACKUP_ARRAY[$i]=\"no\"\n        printf \"Not backing up '%s' configuration.\\n\" $x\n#        snapper -c $x delete $new_num\n    fi\n\n    i=$(($i+1))\n\ndone\n\n# Actual backing up\nprintf \"\\nPerforming backups...\\n\" | tee $PIPE\ni=-1\nfor x in $selected_configs; do\n\n    i=$(($i+1))\n\n    SNAP_SYNC_EXCLUDE=no\n\n    if [[ -f \"/etc/snapper/configs/$x\" ]]; then\n        source /etc/snapper/configs/$x\n    else\n        die \"Selected snapper configuration $x does not exist.\"\n    fi\n\n    cont_backup=${CONT_BACKUP_ARRAY[$i]}\n    if [[ $cont_backup == \"no\" || $SNAP_SYNC_EXCLUDE == \"yes\" ]]; then \n        notify_info \"Backup in progress\" \"NOTE: Skipping $x configuration.\"\n        continue\n    fi\n\n    notify_info \"Backup in progress\" \"Backing up $x configuration.\"\n\n    printf \"\\n\"\n\n    old_num=\"${OLD_NUM_ARRAY[$i]}\"\n    old_snap=\"${OLD_SNAP_ARRAY[$i]}\"\n    BACKUPDIR=\"${BACKUPDIRS_ARRAY[$i]}\"\n    mybackupdir=\"${MYBACKUPDIR_ARRAY[$i]}\"\n    new_num=\"${NEW_NUM_ARRAY[$i]}\"\n    new_snap=\"${NEW_SNAP_ARRAY[$i]}\"\n    new_info=\"${NEW_INFO_ARRAY[$i]}\"\n    backup_location=\"${BACKUPLOC_ARRAY[$i]}\"\n\n    if (test -d \"$backup_location/snapshot\") ; then\n        printf \"ERROR: Backup directory '%s' already exists. Skipping backup of this configuration!\\n\" \"$backup_location/snapshot\" | tee $PIPE\n        continue\n    fi\n\n    mkdir -p $backup_location\n\n    if [[ -z \"$old_num\" ]]; then\n        printf \"Sending first snapshot for '%s' configuration...\\n\" \"$x\" | tee $PIPE  \n#        btrfs send \"$new_snap\" | btrfs receive \"$backup_location\" &>/dev/null\n    else\n\n        printf \"Sending incremental snapshot for '%s' configuration...\\n\" \"$x\" | tee $PIPE  \n        # Sends the difference between the new snapshot and old snapshot to the\n        # backup location. Using the -c flag instead of -p tells it that there\n        # is an identical subvolume to the old snapshot at the receiving\n        # location where it can get its data. This helps speed up the transfer.\n\n#        btrfs send -c \"$old_snap\" \"$new_snap\" | btrfs receive \"$backup_location\"\n\n#        printf \"Modifying data for old local snapshot for '%s' configuration...\\n\" \"$x\" | tee $PIPE\n#        snapper -v -c \"$x\" modify -d \"old snap-sync snapshot (you may remove)\" -u \"backupdir=,subvolid=,uuid=\" -c \"number\" \"$old_num\"\n\n    fi\n\n    cp \"$new_info\" \"$backup_location\"\n\n    # It's important not to change this userdata in the snapshots, since that's how\n    # we find the previous one.\n\n#    userdata=\"backupdir=$mybackupdir, subvolid=$selected_subvolid, uuid=$selected_uuid\"\n\n    # Tag new snapshot as the latest\n#    printf \"Tagging local snapshot as latest backup for '%s' configuration...\\n\" \"$x\" | tee $PIPE\n#    snapper -v -c \"$x\" modify -d \"$description\" -u \"$userdata\" \"$new_num\"\n\n    printf \"Backup complete for '%s' configuration.\\n\" \"$x\" > $PIPE\n\ndone\n\nprintf \"\\nDone!\\n\" | tee $PIPE\nexec 3>&-\n\nif [[ \"$uuid_cmdline\" != \"none\" ]]; then\n    notify_info \"Finished\" \"Backups to $uuid_cmdline complete!\"\nelse\n    notify_info \"Finished\" \"Backups complete!\"\nfi\n"
  },
  {
    "path": "clean-aur-dir.py",
    "content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport re\nimport subprocess\n\npkgname_regex = re.compile(\"^(?P<pkgname>[a-z0-9@._+-]+)-(?P<pkgver>[a-z0-9._:-]+)-(?P<arch>any|x86_64|i686)\\.pkg\\.tar(\\.xz)?(\\.sig)?$\", re.IGNORECASE)\n\ndef usage():\n    print(\"Simple utility to clean directories from old Arch's package files, keeping only those currently installed\")\n    print(\"usage: %s PATH\" % sys.argv[0])\n    sys.exit(1)\n\nif __name__ == \"__main__\":\n    if len(sys.argv) != 2:\n        usage()\n\n    path = sys.argv[1]\n    if not os.path.isdir(path):\n        usage()\n    os.chdir(path)\n\n    files = {}\n\n    # remove files that don't match pkgname_reges from further processing!!\n    for f in os.listdir():\n        if not os.path.isfile(f):\n            continue\n        match = re.match(pkgname_regex, f)\n        if match:\n            # strip extension for future comparison with expac's output\n            files[f] = \"{pkgname}-{pkgver}-{arch}\".format(**match.groupdict())\n\n    # get list of installed packages\n    installed = subprocess.check_output(\"expac -Qs '%n-%v-%a'\", shell=True, universal_newlines=True).splitlines()\n\n    for f in sorted(files):\n        # compare with the key instead of the whole filename\n        # (drops file extensions like .pkg.tar.{xz,gz}{,.sig} )\n        ff = files[f]\n\n        if ff in installed:\n            print(\"Kept:    %s\" % f)\n        else:\n            print(\"Deleted: %s\" % f)\n            os.remove(f)\n"
  },
  {
    "path": "convertToUtf8.py",
    "content": "#! /usr/bin/env python\n\nimport sys\nimport os\nimport traceback\n\nCHARSETS = (\"ascii\", \"cp1250\", \"cp1252\", \"iso-8859-9\", \"iso-8859-15\")\n\ndef is_utf8(filepath):\n    try:\n        file = open(filepath, \"rb\")\n        file.read().decode('utf-8')\n        file.close()\n        return True\n    except:\n        return False\n    \ndef to_utf8(path):\n    for charset in CHARSETS:\n        try:\n            f = open(path, 'rb')\n            content = f.read().decode(charset)\n            f.close()\n            f = open(path, 'wb')\n            f.write(content.encode('utf-8'))\n            f.close()\n            return \"Converting to utf-8: \" + os.path.split(path)[1]\n        except:\n            pass\n    return \"Unable to open \" + os.path.split(path)[1] + \" - unknown charset or binary file.\"\n\ndef run():\n    message = \"\"\n    for filename in sys.argv[1:]:\n        if os.path.isfile(filename):\n            if is_utf8(filename):\n                message += os.path.split(filename)[1] + \" is already in utf-8.\\n\"\n            else:\n                message += to_utf8(filename) + \"\\n\"\n    return message.strip()\n    \nif __name__ == \"__main__\":\n    if len(sys.argv) < 2:\n        print(\"Usage: \" + sys.argv[0] + \" file1 [file2 ...]\")\n        sys.exit(1)\n        \n    try:\n        message = run()\n    except:\n        message = traceback.format_exc()\n    if message != \"\":\n        print(message)\n"
  },
  {
    "path": "fatcp",
    "content": "#! /usr/bin/bash\n\n# Script for safe copying to FAT32 filesystems.\n# All bad characters are replaced by '_' (underscore) when copying.\n# File conflicts (e.g. 'foo?' and 'foo:' are both mapped to 'foo_') are not checked - using 'cp -i' is recommended.\n\n# Some resources:\n# http://askubuntu.com/questions/11634/how-can-i-substitute-colons-when-i-rsync-on-a-usb-key\n#\n# Simple (stupid) alternative:\n# find -type f -name '*.pat' -print0  | tar -c -f - --null --files-from - | tar -C /path/to/dst -v -x -f - --show-transformed --transform 's/?/_/g'\n#\n\n# two arguments are accepted\nif [[ $# -ne 2 ]]; then\n    echo \"Usage: $0 <src path> <dst path>\"\n    exit 1\nfi\n\nbase=$(realpath \"$1\")\nbasedir=$(dirname \"$base\")\ndst=$(realpath \"$2\")\n\n# $dst must be existing dir\nif [[ ! -d \"$dst\" ]]; then\n    echo \"Target directory '$dst' does not exist.\"\n    exit 1\nfi\n\n# 'cp' alias\nCP=\"cp -i --preserve=all\"\n# characters that will be replaced with '_'\nBADCHARS='<>|;:!?\"*\\+'\n\n# enhance globbing\nshopt -s dotglob globstar\n\n# function creating target file/dir name\nmk_target() {\n    local target=${1#\"$basedir\"}\n    echo \"$dst/${target//[$BADCHARS]/_}\"\n}\n\n# dirs and files are handled differently\nif [[ -d \"$base\" ]]; then\n    target=$(mk_target \"$base\")\n    mkdir \"$target\"\n    for src in \"$base\"/**/*; do\n        target=$(mk_target \"$src\")\n        if [[ -d \"$src\" ]]; then\n            mkdir -p -- \"$target\"\n        elif [[ \"$src\" != \"$target\" ]]; then\n            $CP -- \"$src\" \"$target\"\n        fi\n    done\nelif [[ -f \"$base\" ]]; then\n    target=$(mk_target \"$base\")\n    if [[ \"$src\" != \"$target\" ]]; then\n        $CP -- \"$base\" \"$target\"\n    fi\nfi\n"
  },
  {
    "path": "ffparser.py",
    "content": "#! /usr/bin/env python\n\nimport argparse\n\nfrom pythonscripts.ffparser import FFprobeParser\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"parse ffprobe's json output\")\n\n    option = parser.add_mutually_exclusive_group(required=True)\n    option.add_argument(\"-a\", \"--audio\", action=\"store_const\", const=\"audio\", dest=\"option\", help=\"get audio attribute\")\n    option.add_argument(\"-v\", \"--video\", action=\"store_const\", const=\"video\", dest=\"option\", help=\"get video attribute\")\n    option.add_argument(\"-f\", \"--format\", action=\"store_const\", const=\"format\", dest=\"option\", help=\"get format attribute\")\n\n    action = parser.add_mutually_exclusive_group(required=True)\n    action.add_argument(\"-g\", \"--get\", action=\"store\", nargs=1, dest=\"attribute\", help=\"attribute name to get\")\n    action.add_argument(\"-p\", \"--print\", action=\"store_true\", dest=\"pprint\", help=\"print all attributes and exit\")\n\n    parser.add_argument(\"path\", action=\"store\", nargs=1, help=\"path to file to parse\")\n\n    args = parser.parse_args()\n    ffparser = FFprobeParser(args.path[0])\n    if args.pprint:\n        ffparser.pprint(args.option)\n    else:\n        print(ffparser.get(args.option, args.attribute[0]))\n"
  },
  {
    "path": "fmount.py",
    "content": "#! /usr/bin/env python3\n\nimport argparse\nimport configparser\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nCONFIG = Path.home() / \".config\" / \"fmount.conf\"\nDEFAULT_MOUNTPATH = Path.home() / \"mnt\"\n\n\n# we just strip spaces in the mntopts string\ndef reformat_mntopts(mntopts):\n    mntopts = mntopts.split(\",\")\n    options = []\n    for opt in mntopts:\n        options.append(\"=\".join(tk.strip() for tk in opt.split(\"=\")))\n    return \",\".join(set(options))\n\n\ndef mount_gio(*, scheme: str, host: str, path: str, user: str, port: str, mountpoint: Path):\n    if mountpoint.exists() and not mountpoint.is_symlink():\n        print(f\"Error: path {mountpoint} exists but is not a symlink\", file=sys.stderr)\n        return\n\n    location = f\"{scheme}://\"\n    if user:\n        location += user + \"@\"\n    location += host\n    if port:\n        location += \":\" + port\n    location += \"/\" + path\n\n    # get path to thet gvfs directory\n    XDG_RUNTIME_DIR = os.environ.get(\"XDG_RUNTIME_DIR\")\n    if XDG_RUNTIME_DIR is None:\n        XDG_RUNTIME_DIR = f\"/run/user/{os.getuid()}\"\n    gvfs = Path(XDG_RUNTIME_DIR) / \"gvfs\"\n\n    # save current gvfs mounts\n    if gvfs.is_dir():\n        mounts_before = set(gvfs.glob(f\"{scheme}-share:*\"))\n    else:\n        mounts_before = set()\n\n    print(f\"Mounting {location}\")\n    cmd = [\"gio\", \"mount\", location]\n    subprocess.run(cmd, check=True)\n\n    if not gvfs.is_dir():\n        print(f\"Error: gvfs directory {gvfs} does not exist\", file=sys.stderr)\n        return\n\n    # detect the new gvfs mount symlink it to mountpoint\n    mounts_after = set(gvfs.glob(f\"{scheme}-share:*\"))\n    target = list(mounts_after - mounts_before)[0]\n\n    # hack for inaccessible parents of the path on smb servers\n    if scheme == \"smb\":\n        _path = Path(path.lstrip(\"/\"))\n        # the first part is the remote share, the rest is the location we want\n        target /= _path.relative_to(_path.parts[0])\n\n    # create a symlink from mountpoint to gvfs target\n    mountpoint.symlink_to(target)\n\n\ndef mount_sshfs(*, host: str, path: str, user: str, port: str, mountpoint: Path, mntopts: str):\n    uhd = host + \":\" + path\n    if user:\n        uhd = user + \"@\" + uhd\n\n    cmd = [\"sshfs\", uhd, str(mountpoint)]\n    if mntopts:\n        cmd += [\"-o\", mntopts]\n    if port:\n        cmd += [\"-p\", port]\n\n    print(f\"Mounting at '{mountpoint}'...\")\n    # the mountpoint might exist after an error or automatic unmount\n    mountpoint.mkdir(parents=True, exist_ok=True)\n    subprocess.run(cmd, check=True)\n\n\ndef mount(name, mountpath: Path, config):\n    mountpoint = mountpath / name\n    scheme = config.get(name, \"scheme\", fallback=\"sshfs\")\n    host = config.get(name, \"host\", fallback=name)\n    path = config.get(name, \"path\", fallback=\"\")\n    user = config.get(name, \"user\", fallback=None)\n    port = config.get(name, \"port\", fallback=None)\n    mntopts = config.get(name, \"mntopts\", fallback=\"\")\n    mntopts = reformat_mntopts(mntopts)\n\n    if scheme == \"sshfs\":\n        # sshfs is *much* faster than gvfs\n        return mount_sshfs(\n            host=host,\n            path=path,\n            user=user,\n            port=port,\n            mountpoint=mountpoint,\n            mntopts=mntopts,\n        )\n    else:\n        return mount_gio(\n            scheme=scheme,\n            host=host,\n            path=path,\n            user=user,\n            port=port,\n            mountpoint=mountpoint,\n        )\n\n\ndef umount(mntpoint: Path):\n    if path.is_mount():\n        cmd = [\"fusermount3\", \"-u\", str(mntpoint)]\n        subprocess.run(cmd, check=True)\n        clean(mntpoint)\n    elif path.is_symlink():\n        if path.readlink().exists():\n            cmd = [\"gio\", \"mount\", \"--unmount\", str(mntpoint.resolve())]\n            subprocess.run(cmd, check=True)\n        # do not call clean(path), gio takes a while to remove the target\n        path.unlink()\n    elif path.is_dir():\n        print(f\"Note: directory '{path}' is not a mount point.\", file=sys.stderr)\n        return\n\n\ndef clean(path: Path):\n    if path.is_symlink() and not path.readlink().exists():\n        print(f\"Removing broken symlink '{path}'...\")\n        path.unlink()\n    else:\n        if not path.is_mount() and not any(path.iterdir()):\n            print(f\"Removing empty mountpoint '{path}'...\")\n            path.rmdir()\n\n\ndef cleanAll(mountpath):\n    for file in mountpath.iterdir():\n        path = mountpath / file\n        if path.is_dir():\n            clean(path)\n\n\ndef writeDefaultConfig():\n    with open(CONFIG, mode=\"w\", encoding=\"utf-8\") as cfile:\n        print(\n            f\"\"\"\\\n# globals live in the DEFAULT section\n[DEFAULT]\nmountpath = {DEFAULT_MOUNTPATH}\n#mntopts = opt1=val1, opt2=val2, ... # optional\n\n#[remote_name]\n#scheme = ... # optional, either sshfs (default) or anything else supported by gvfs\n#host = ... # optional, equal to remote_name by default\n#path = ... # optional, sshfs defaults to remote $HOME\n#user = ... # optional, .ssh/config is honoured\n#port = ... # optional, .ssh/config is honoured\n#mntopts = opt1=val1, opt2=val2, ... # optional\n\"\"\",\n            file=cfile,\n        )\n\n\nif __name__ == \"__main__\":\n    config = configparser.ConfigParser()\n    if not CONFIG.exists():\n        writeDefaultConfig()\n    config.read(CONFIG)\n\n    parser = argparse.ArgumentParser(\n        description=\"wrapper for sshfs and gio with a config file\"\n    )\n    parser.add_argument(\n        \"--list-available\",\n        action=\"store_true\",\n        help=\"list the hosts defined in the configuration file and exit\",\n    )\n    parser.add_argument(\n        \"--list-mounted\",\n        action=\"store_true\",\n        help=\"list the currently mounted hosts and exit\",\n    )\n    parser.add_argument(\n        \"-u\", \"--unmount\", action=\"store_true\", help=\"unmount given host or path\"\n    )\n    parser.add_argument(\n        \"host\", nargs=\"*\", help=\"remote name(s) specified in the config file\"\n    )\n    args = parser.parse_args()\n\n    mountpath = Path(\n        os.path.expanduser(\n            config.get(\"DEFAULT\", \"mountpath\", fallback=DEFAULT_MOUNTPATH)\n        )\n    )\n\n    if args.list_available:\n        hosts = set(key for key in config.keys() if key != \"DEFAULT\")\n        for host in sorted(hosts):\n            print(host)\n\n    elif args.list_mounted:\n        for file in sorted(mountpath.iterdir()):\n            print(file.name)\n\n    else:\n        if args.host:\n            for host in args.host:\n                if args.unmount:\n                    if Path(host).is_dir():\n                        # not a host, but a path\n                        path = Path(host)\n                    else:\n                        path = mountpath / host\n                        if not path.exists():\n                            print(\n                                f\"Note: path '{path}' does not exist.\", file=sys.stderr\n                            )\n                    umount(path)\n                else:\n                    if config.has_section(host):\n                        if (mountpath / host).is_mount():\n                            parser.error(f\"Host '{host}' is already mounted.\")\n                        mount(host, mountpath, config)\n                    else:\n                        parser.error(\n                            f\"Section '{host}' does not exist in the config file.\"\n                        )\n        else:\n            parser.error(\"No hosts were given.\")\n        cleanAll(mountpath)\n"
  },
  {
    "path": "forcemp3convert.sh",
    "content": "#! /bin/bash\n\n# forcefully convert any file to mp3 (with fixed bitrate), preserving metadata (if possible)\n\nset -e\n\nfor file in \"$@\"; do\n    tmpfile=\"$(mktemp -u)-forcemp3convert.mp3\"\n    ffmpeg -i \"$file\" -acodec libmp3lame -ar 44100 -ab 128k -ac 2 -f mp3 -map_metadata 0 -y \"$tmpfile\"\n    mv \"$tmpfile\" \"${file%\\.*}.mp3\"\ndone\n"
  },
  {
    "path": "hddtemp.sh",
    "content": "#!/bin/bash\n\ndevices=\"$@\"\ndevices=${devices:-/dev/sda}\n\nfor device in $devices; do\n    cmd=\"smartctl -d ata -a $device | grep \\\"Temperature_Celsius\\\" | awk '{print \\$10}'\"\n\n    if [[ $UID != 0 ]]; then\n        echo \"Running \\`sudo $cmd\\`\"\n        temp=$(eval \"sudo $cmd\")\n    else\n        echo \"Running \\`$cmd\\`\"\n        temp=$(eval \"$cmd\")\n    fi\n\n    echo \"Temperature of $device: $temp°C\"\ndone\n"
  },
  {
    "path": "imap-notifier.py",
    "content": "#!/usr/bin/env python3\n\nimport asyncio\nimport email.header\nimport email.parser\nimport imaplib\nimport json\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport jsonschema\nimport yaml\n\nlogger = logging.getLogger(__name__)\n\n# Define the JSON schema for the configuration file\nconfig_schema = {\n    \"type\": \"object\",\n    \"required\": [\"accounts\"],\n    \"properties\": {\n        \"accounts\": {\n            \"type\": \"array\",\n            \"minItems\": 1,\n            \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                    \"username\",\n                    \"hostname\",\n                    \"password_command\",\n                ],\n                \"properties\": {\n                    \"username\": {\n                        \"type\": \"string\",\n                        \"minLength\": 1,\n                    },\n                    \"protocol\": {\n                        \"type\": \"string\",\n                        \"enum\": [\"imaps\", \"imap\"],\n                    },\n                    \"hostname\": {\n                        \"type\": \"string\",\n                        \"format\": \"hostname\",\n                    },\n                    \"port\": {\n                        \"type\": \"integer\",\n                        \"minimum\": 1,\n                        \"maximum\": 65535,\n                    },\n                    \"password_command\": {\n                        \"type\": \"string\",\n                        \"minLength\": 1,\n                    },\n                    \"include_mailboxes\": {\n                        \"type\": \"array\",\n                        \"items\": {\"type\": \"string\"},\n                        \"minItems\": 1,\n                        \"uniqueItems\": True,\n                    },\n                    \"exclude_mailboxes\": {\n                        \"type\": \"array\",\n                        \"items\": {\"type\": \"string\"},\n                        \"minItems\": 1,\n                        \"uniqueItems\": True,\n                    },\n                },\n                \"additionalProperties\": False,\n                \"allOf\": [\n                    {\"not\": {\"required\": [\"include_mailboxes\", \"exclude_mailboxes\"]}}\n                ],\n            },\n        },\n        \"timeout\": {\n            \"type\": \"integer\",\n            \"minimum\": 30,\n            \"maximum\": 3600,\n        },\n    },\n    \"additionalProperties\": False,\n}\n\n\ndef load_config(config_path: Path):\n    \"\"\"Load configuration from XDG_CONFIG_HOME/imap-notifier.yaml\"\"\"\n    try:\n        with open(config_path, \"r\") as f:\n            config = yaml.safe_load(f)\n        # Validate the configuration against the schema\n        jsonschema.validate(instance=config, schema=config_schema)\n        return config\n    except FileNotFoundError:\n        logger.error(f\"Configuration file not found: {config_path}\")\n        return None\n    except yaml.YAMLError as e:\n        logger.error(f\"Error parsing configuration file: {e}\")\n        return None\n    except jsonschema.ValidationError as e:\n        logger.error(f\"Invalid configuration: {e}\")\n        return None\n\n\n# helper function to decode MIME-encoded headers\n# https://docs.python.org/3/library/email.header.html#email.header.decode_header\ndef decode_header(header):\n    if header is None:\n        return None\n    parts = email.header.decode_header(header)\n    decoded = \"\"\n    for s, charset in parts:\n        if isinstance(s, str):\n            # already str - just append\n            decoded += s\n        else:\n            # byte string - needs to be decoded\n            if charset is None:\n                charset = \"ascii\"\n            decoded += str(s, encoding=charset)\n    return decoded\n\n\nclass IMAPNotifier:\n    xdg_config_home = os.environ.get(\"XDG_CONFIG_HOME\", os.path.expanduser(\"~/.config\"))\n    config_path = Path(xdg_config_home) / \"imap-notifier.yaml\"\n\n    xdg_state_home = os.environ.get(\n        \"XDG_STATE_HOME\", os.path.expanduser(\"~/.local/state\")\n    )\n    state_file_path = Path(xdg_state_home) / \"imap-notifier\" / \"state.json\"\n\n    def __init__(self):\n        self.config = load_config(self.config_path)\n        self.state = {}\n        self.mail_connections = {}\n\n        self.shutdown_event = asyncio.Event()\n\n    def load_state(self):\n        \"\"\"Load last check times from state file\"\"\"\n        try:\n            with open(self.state_file_path, \"r\") as f:\n                self.state = json.load(f)\n        except (FileNotFoundError, json.JSONDecodeError):\n            return {}\n\n    def save_state(self):\n        \"\"\"Save last check times to state file\"\"\"\n        try:\n            self.state_file_path.parent.mkdir(parents=True, exist_ok=True)\n            with open(self.state_file_path, \"w\") as f:\n                json.dump(self.state, f)\n        except Exception as e:\n            logger.error(f\"Failed to save state: {e}\")\n\n    def get_account_id(self, account_config):\n        \"\"\"Generate a unique ID for an account\"\"\"\n        username = account_config[\"username\"]\n        protocol = account_config.get(\"protocol\", \"imaps\")\n        hostname = account_config[\"hostname\"]\n        if protocol == \"imaps\":\n            port = account_config.get(\"port\", 993)\n        else:\n            port = account_config.get(\"port\", 143)\n        return f\"{protocol}://{username}@{hostname}:{port}\"\n\n    def get_password(self, account_config):\n        \"\"\"Get password using the configured command\"\"\"\n        if \"password_command\" in account_config:\n            try:\n                result = subprocess.run(\n                    account_config[\"password_command\"],\n                    shell=True,\n                    capture_output=True,\n                    text=True,\n                    check=True,\n                )\n                return result.stdout.strip()\n            except subprocess.CalledProcessError as e:\n                logger.error(\n                    f\"Failed to get password for {account_config['username']}: {e}\"\n                )\n                return None\n        else:\n            logger.error(\n                f\"No password command configured for {account_config['username']}\"\n            )\n            return None\n\n    def send_notification(self, message):\n        \"\"\"Send desktop notification for new email\"\"\"\n        try:\n            # Extract sender and subject\n            sender = decode_header(message.get(\"From\")) or \"[Unknown Sender]\"\n            subject = decode_header(message.get(\"Subject\")) or \"[No Subject]\"\n\n            subprocess.run(\n                [\n                    \"notify-send\",\n                    \"--app-name=EmailNotification\",\n                    \"--expire-time=3000\",  # duration in ms\n                    \"--urgency=normal\",  # critical would be shown forever\n                    \"--icon=mail-message-new-symbolic\",\n                    \"--category=email.arrived\",\n                    \"Received new email\",\n                    f\"{sender} — {subject}\",\n                ],\n                check=True,\n            )\n            logger.info(f\"Notification sent for email from: {sender}\")\n\n        except subprocess.CalledProcessError as e:\n            logger.error(f\"Failed to send notification: {e}\")\n        except Exception as e:\n            logger.error(f\"Unexpected error sending notification: {e}\")\n\n    def is_connection_alive(self, connection, account_id):\n        \"\"\"Check if IMAP connection is still alive\"\"\"\n        try:\n            # Send a NOOP command to test the connection\n            connection.noop()\n            return True\n        except Exception:\n            logger.warning(f\"Connection for account {account_id} is not alive\")\n            return False\n\n    async def connect_to_account(self, account_config):\n        \"\"\"Establish IMAP connection for an account\"\"\"\n        protocol = account_config.get(\"protocol\", \"imaps\")\n        hostname = account_config[\"hostname\"]\n\n        try:\n            # Create connection based on whether it's secure (imaps) or not\n            if protocol == \"imaps\":\n                port = account_config.get(\"port\", 993)\n                client = imaplib.IMAP4_SSL(hostname, port)\n            else:\n                port = account_config.get(\"port\", 143)\n                client = imaplib.IMAP4(hostname, port)\n\n            # Get password\n            username = account_config[\"username\"]\n            password = self.get_password(account_config)\n            if not password:\n                logger.error(f\"No password returned for account {username:!r}\")\n                return None\n\n            client.login(username, password)\n\n            logger.info(\n                f\"Connected to {protocol}://{hostname}:{port} as user {username}\"\n            )\n            return client\n\n        except Exception as e:\n            logger.error(f\"Failed to connect to {protocol}://{hostname}: {e}\")\n            return None\n\n    async def get_new_emails(self, connection, account_id, mailboxes_to_process):\n        \"\"\"Get new emails since last check\"\"\"\n        logger.debug(f\"Checking {account_id} for new emails\")\n\n        # Get previous unseen emails from the state\n        account_state = self.state.setdefault(account_id, {})\n        previous_unseen_message_ids = set(account_state.get(\"unseen_message_ids\", []))\n\n        unseen_message_ids = set()\n        new_emails = []\n\n        # Process each mailbox\n        for mailbox in mailboxes_to_process:\n            try:\n                # Remove old state data\n                # TODO: remove this after some time\n                if mailbox in account_state:\n                    del account_state[mailbox]\n\n                # Select mailbox\n                connection.select(mailbox)\n\n                # Search for unseen emails\n                status, messages = connection.search(None, \"UNSEEN\")\n\n                if status != \"OK\":\n                    logger.error(\n                        f\"Failed to search emails in mailbox {mailbox} for account {account_id}\"\n                    )\n                    continue\n\n                email_ids = messages[0].split()\n\n                # Process new emails\n                for email_id in email_ids:\n                    try:\n                        # Fetch the email headers only\n                        status, msg_data = connection.fetch(email_id, \"(RFC822.HEADER)\")\n\n                        if status == \"OK\":\n                            msg = email.parser.Parser().parsestr(\n                                msg_data[0][1].decode(\"utf-8\", errors=\"ignore\")\n                            )\n                            # Always get a Message-ID, which uniquely identifies the message.\n                            # The `email_id` obtained from IMAP is just numeric identifier in the *mailbox*,\n                            # not in the whole account.\n                            message_id = msg.get(\"Message-ID\")\n                            unseen_message_ids.add(message_id)\n                            if message_id not in previous_unseen_message_ids:\n                                new_emails.append(msg)\n\n                    except Exception as e:\n                        logger.error(f\"Failed to fetch email {email_id}: {e}\")\n                        continue\n\n            except Exception as e:\n                logger.error(\n                    f\"Error processing mailbox {mailbox} for account {account_id}: {e}\"\n                )\n\n        # Update IDs of unseen emails in the state\n        account_state[\"unseen_message_ids\"] = sorted(unseen_message_ids)\n\n        return new_emails\n\n    async def process_mailboxes(self, account_config, account_id, connection):\n        \"\"\"Process mailboxes for an account\"\"\"\n        # Determine which mailboxes to process\n        include_mailboxes = account_config.get(\"include_mailboxes\", [])\n        exclude_mailboxes = account_config.get(\"exclude_mailboxes\", [])\n\n        if include_mailboxes and exclude_mailboxes:\n            logger.error(\n                f\"Both include_mailboxes and exclude_mailboxes are defined for account \"\n                f\"{account_id}. Please specify only one of them.\"\n            )\n            return\n\n        # If no mailboxes specified but exclude_mailboxes is defined,\n        # get all mailboxes from server and filter out excluded ones\n        if not include_mailboxes and exclude_mailboxes:\n            try:\n                # Get all mailboxes from server\n                status, mailbox_list = connection.list()\n                all_mailboxes = []\n                if status == \"OK\" and mailbox_list:\n                    for item in mailbox_list:\n                        # Extract mailbox name from LIST response\n                        mailbox_name = item.decode().split(' \"/\" ')[-1].strip('\"')\n                        all_mailboxes.append(mailbox_name)\n\n                # Filter out excluded mailboxes\n                mailboxes_to_process = [\n                    mb for mb in all_mailboxes if mb not in exclude_mailboxes\n                ]\n            except Exception as e:\n                logger.error(f\"Error retrieving mailboxes from server: {e}\")\n                return\n        elif include_mailboxes:\n            # Use configured mailboxes\n            mailboxes_to_process = include_mailboxes\n        else:\n            # Fallback to INBOX\n            mailboxes_to_process = [\"INBOX\"]\n\n        # Get new emails\n        emails = await self.get_new_emails(connection, account_id, mailboxes_to_process)\n\n        # Send notifications for new emails\n        for message in emails:\n            self.send_notification(message)\n\n    async def process_account(self, account_config):\n        \"\"\"Process a single account\"\"\"\n        # Generate a unique ID for the account\n        account_id = self.get_account_id(account_config)\n\n        # Check if there's an existing connection for this account\n        connection = self.mail_connections.get(account_id)\n\n        try:\n            # If no connection exists or it's closed, create a new one\n            if not connection or not self.is_connection_alive(connection, account_id):\n                connection = await self.connect_to_account(account_config)\n                if not connection:\n                    return\n                self.mail_connections[account_id] = connection\n\n            # Process mailboxes for this account\n            await self.process_mailboxes(account_config, account_id, connection)\n\n            logger.debug(f\"Finished processing account {account_id}\")\n\n        except Exception as e:\n            logger.error(f\"Error processing account {account_id}: {e}\")\n            # Remove failed connection from cache\n            if account_id in self.mail_connections:\n                del self.mail_connections[account_id]\n\n    async def run(self):\n        \"\"\"Run the notifier\"\"\"\n\n        if not self.config:\n            return False\n\n        timeout = int(self.config.get(\"timeout\", 60))\n        logger.info(f\"Starting mail notifier with timeout {timeout} seconds\")\n\n        while not self.shutdown_event.is_set():\n            # Process all accounts concurrently\n            async with asyncio.TaskGroup() as tg:\n                for account in self.config[\"accounts\"]:\n                    tg.create_task(self.process_account(account))\n\n            # Save state after each cycle\n            self.save_state()\n\n            # Wait before next check\n            await asyncio.sleep(timeout)\n\n\nasync def main_async():\n    \"\"\"Async main function\"\"\"\n\n    # Create notifier\n    notifier = IMAPNotifier()\n\n    # Load existing state\n    notifier.load_state()\n\n    result = True\n    try:\n        # Run the notifier\n        result = await notifier.run()\n    except KeyboardInterrupt:\n        logger.info(\"Interrupted by user\")\n    finally:\n        # Save final state\n        notifier.save_state()\n        logger.info(\"Notifier stopped\")\n\n    if result is False:\n        sys.exit(1)\n\n\ndef main():\n    \"\"\"Main function\"\"\"\n    # Create event loop and run async main\n    asyncio.run(main_async())\n\n\nif __name__ == \"__main__\":\n    # Configure logging\n    logging.basicConfig(\n        level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n    )\n\n    main()\n"
  },
  {
    "path": "img2pdf.sh",
    "content": "#!/bin/bash\n\nset -e\n\noutfile=$1\next=tif\n\necho \"Converting images to pdf...\"\ndeclare -a pages\n# take input pattern \"anything_number.ext\", sort numerically by \"number\"\nfor file in $(ls ./*.$ext | sort -t_ -k2,2n); do\n    echo \"  $file\"\n    pdf=$(basename \"$file\" .$ext).pdf\n#    convert \"$file\" \"$pdf\"\n    tiff2pdf -z -F -x 300 -y 300 -o \"$pdf\"  \"$file\"\n    pages+=(\"$pdf\")\ndone\necho \"Merging into one pdf...\"\nstapler sel \"${pages[@]}\" \"$outfile\"\n"
  },
  {
    "path": "initscreen.sh",
    "content": "#! /bin/bash\n\n# exit on error\nset -e\n\n#hdmi=$(cat /sys/class/drm/card0-HDMI-A-1/status)\n#vga=$(cat /sys/class/drm/card0-VGA-1/status)\nif [[ \"$WAYLAND_DISPLAY\" == \"\" ]]; then\n    connected=$(xrandr | grep \" connected\" | sed -e \"s/\\([A-Z0-9]\\+\\) connected.*/\\1/\")\nelse\n    connected=$(swaymsg -pt get_outputs | grep -E \"^Output\" | awk '{print $2}')\nfi\n\n#echo \"initscreen.sh: hdmi $hdmi; vga $vga\"\n\nif [[ $connected =~ \"LVDS-0\" ]]; then\n    if [[ $connected =~ \"HDMI-0\" ]]; then\n        # hdmi only\n        # NOTE: i3 fails if no active output is detected, so we have to first enable second output and then disable the first\n    #    xrandr --nograb --output HDMI-0 --auto --primary\n    #    xrandr --nograb --output LVDS-0 --off\n        # both\n        # HDMI-0 is primary, LVDS-0 is panned to be vertically aligned to the bottom\n    #    xrandr --nograb --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312\n        xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312\n    #    xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --right-of HDMI-0\n    elif [[ $connected =~ \"VGA-0\" ]]; then\n    #    xrandr --nograb --output VGA-0 --auto --output LVDS-0 --mode 1024x768 --primary\n        # TODO:  look at --scale argument\n        xrandr --output VGA-0 --auto --primary --output LVDS-0 --auto --below VGA-0\n    else\n    #    xrandr --nograb --output LVDS-0 --auto --primary --output HDMI-0 --off\n    #    xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off\n        xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off --output VGA-0 --off\n    fi\nelif [[ $connected =~ \"eDP-1\" ]]; then\n    if [[ -f /proc/acpi/button/lid/LID/state ]]; then\n        lid=$(cat /proc/acpi/button/lid/LID/state | awk '{print $2}')\n    else\n        lid=\"open\"\n    fi\n    if [[ \"$WAYLAND_DISPLAY\" == \"\" ]]; then\n        if [[ $connected =~ \"HDMI-1\" ]] && [[ \"$lid\" == \"closed\" ]]; then\n            xrandr --output HDMI-1 --auto --primary --output eDP-1 --off\n            echo \"Xft.dpi: 96\" | xrdb -merge\n        elif [[ $connected =~ \"HDMI-1\" ]]; then\n            xrandr --output HDMI-1 --auto --primary --output eDP-1 --auto --left-of HDMI-1\n        else\n            xrandr --output eDP-1 --auto --primary --output HDMI-1 --off\n            echo \"Xft.dpi: 168\" | xrdb -merge   # scale=1.75\n        fi\n    else\n        if [[ $connected =~ \"HDMI-A-1\" ]] && [[ \"$lid\" == \"closed\" ]]; then\n            swaymsg output HDMI-A-1 enable\n            swaymsg output eDP-1 disable\n        elif [[ $connected =~ \"HDMI-A-1\" ]]; then\n            swaymsg output HDMI-A-1 enable\n            swaymsg output eDP-1 enable\n        else\n            swaymsg output eDP-1 enable\n            swaymsg output HDMI-A-1 disable\n        fi\n    fi\nelse\n    first=$(echo $connected | cut -f1 -d' ')\n    xrandr --output ${first} --auto --primary\nfi\n"
  },
  {
    "path": "maildir-strip-attachments.py",
    "content": "#!/usr/bin/env python3\n\n# Documentation:\n# - https://docs.python.org/3/library/mailbox.html#mailbox.Maildir\n# - https://docs.python.org/3/library/mailbox.html#mailbox.MaildirMessage\n\nimport os\nimport argparse\nimport mailbox\n\nDROP_MIN_SIZE = 256  # KiB\nDROP_CONTENT_TYPES = [\n    \"image/\",\n    \"video/\",\n    \"application/pdf\",\n    \"application/x-extension-pdf\",\n    \"application/zip\",\n    \"application/gzip\",\n    \"application/x-gzip\",\n    \"application/x-xz\",\n    \"application/x-7z-compressed\",\n    \"application/x-zip-compressed\",\n    \"application/x-rar-compressed\",\n    \"application/x-msdownload\",\n    \"application/msword\",\n    \"application/vnd.ms-excel\",\n    \"application/vnd.ms-powerpoint\",\n    \"application/vnd.ms-xpsdocument\",\n    \"application/octet-stream\",\n]\n\ndef process_maildir(maildir):\n    dropped_items = 0\n    dropped_size = 0\n\n    mb = mailbox.Maildir(maildir, create=False)\n    for key, message in mb.iteritems():\n        for part in message.walk():\n            if part.is_multipart():\n                continue\n            size = len(part.as_bytes()) / 1024\n            if size > DROP_MIN_SIZE:\n                print(\"{}\\tsize: {:g} KiB\".format(part.get_content_type(), size))\n                for ct in DROP_CONTENT_TYPES:\n                    if part.get_content_type().startswith(ct):\n                        part.set_payload(\"\")\n                        dropped_items += 1\n                        dropped_size += size\n\n        # update the message on disk\n        mb.update({key: message})\n\n    print(\"Dropped {} attachements ({:g} MiB).\".format(dropped_items, dropped_size / 1024))\n\ndef argtype_dir_path(string):\n    if os.path.isdir(string):\n        return string\n    raise NotADirectoryError(string)\n\ndef argtype_maildir(string):\n    string = argtype_dir_path(string)\n    for sub in [\"cur\", \"new\", \"tmp\"]:\n        subdir = os.path.join(string, sub)\n        if not os.path.isdir(subdir):\n            raise NotADirectoryError(subdir)\n    return string\n\nif __name__ == \"__main__\":\n    ap = argparse.ArgumentParser(description=\"Strip attachments from messages in a maildir.\")\n    ap.add_argument(\"maildir\", metavar=\"PATH\", type=argtype_maildir,\n                    help=\"path to the maildir\")\n\n    args = ap.parse_args()\n    process_maildir(args.maildir)\n"
  },
  {
    "path": "makeissue.sh",
    "content": "echo -e '\\e[H\\e[2J' > issue\necho -e '                                                            \\e[1;30m| \\e[34m\\\\s \\\\r' >> issue\necho -e '       \\e[36;1m/\\\\\\\\                      \\e[37m||     \\e[36m| |                   \\e[30m|' >> issue\necho -e '      \\e[36m/  \\\\\\\\                     \\e[37m||     \\e[36m|     _               \\e[30m| \\e[32m\\\\t' >> issue\necho -e '     \\e[1;36m/ \\e[0;36m.. \\e[1m\\\\\\\\   \\e[37m//==\\\\\\\\\\\\\\\\ ||/= /==\\\\\\\\ ||/=\\\\\\\\  \\e[36m| | |/ \\\\\\\\ |  | \\\\\\\\ /     \\e[30m| \\e[32m\\\\d' >> issue\necho -e '    \\e[0;36m/ .  . \\\\\\\\  \\e[37m||  || ||   |    ||  || \\e[36m| | |  | |  |  X      \\e[1;30m|' >> issue\necho -e '   \\e[0;36m/  .  .  \\\\\\\\ \\e[37m\\\\\\\\\\\\\\\\==/| ||   \\\\\\\\==/ ||  || \\e[36m| | |  | \\\\\\\\_/| / \\\\\\\\     \\e[1;30m| \\e[31m\\\\U' >> issue\necho -e '  \\e[0;36m/ ..    .. \\\\\\\\   \\e[0;37mA simple, lightweight linux distribution.  \\e[1;30m|' >> issue\necho -e ' \\e[0;36m/_\\x27        `_\\\\\\\\                                             \\e[1;30m| \\e[35m\\\\l \\e[0mon \\e[1;33m\\\\n' >> issue\necho -e ' \\e[0m' >> issue\necho -e '' >> issue\n"
  },
  {
    "path": "mp3convert.py",
    "content": "#! /usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nimport re\nimport shutil\nimport subprocess\nimport shlex\n\nfrom pythonscripts.cpu import cores_count\nfrom pythonscripts.tempfiles import TempFiles\nfrom pythonscripts.ffparser import FFprobeParser\n\n\naudio_types = (\"mp3\", \"aac\", \"ac3\", \"mp2\", \"wma\", \"wav\", \"mka\", \"m4a\", \"ogg\", \"oga\", \"flac\")\naudio_file_regex = re.compile(\"^(?P<dirname>/(.*/)*)(?P<filename>.*(?P<extension>\\.(\" + \"|\".join(audio_types) + \")))$\")\nffmpeg_command = \"ffmpeg -i {input} -acodec libmp3lame -ar 44100 -ab {bitrate:d}k -ac 2 -f mp3 -map_metadata 0 -y {output}\"\n\n\nclass GettingBitrateError(Exception):\n    def __init__(self, fname):\n        self.message = \"Couldn't get bitrate from file \" + fname\n\n\nclass ConversionError(Exception):\n    def __init__(self, fname, status, output):\n        self.message = \"Error while converting file \" + fname + \"\\nffmpeg exited with status \" + str(status) + \"\\n\" + output\n\n\ndef get_bitrate(filename):\n    parser = FFprobeParser(filename)\n    bitrate = parser.get(\"audio\", \"bit_rate\")\n    del parser\n    if bitrate is None:\n        raise GettingBitrateError(filename)\n    else:\n        return bitrate // 1000\n\n\ndef convert(filename, output_extension, bitrate, delete_after=False):\n    tmpfile = tmp.getTempFileName()\n    command = ffmpeg_command.format(input=shlex.quote(filename), bitrate=bitrate, output=shlex.quote(tmpfile))\n    try:\n        subprocess.run(command, shell=True, check=True, capture_output=True)\n        if delete_after:\n            os.remove(filename)\n        shutil.move(tmpfile, os.path.splitext(filename)[0] + output_extension)\n        tmp.remove(tmpfile)\n    except subprocess.CalledProcessError as e:\n        tmp.remove(tmpfile)\n        raise ConversionError(filename, e.returncode, e.output)\n\n\nclass Main():\n    def __init__(self, args):\n        self.countAudioFiles = 0\n        self.countHigherBitrate = 0\n        self.countDifferentFormat = 0\n        self.countErrors = 0\n        self.countNonAudioFiles = 0\n\n        self.dry_run = args.dry_run\n        self.bitrate = args.bitrate\n        self.verbose = args.verbose\n        self.recursive = args.recursive\n        self.deleteAfter = args.delete_after\n        self.outputExtension = \".\" + args.output_extension\n        self.paths = args.path\n\n    def print_stats(self):\n        print()\n        print(\"-----------collected statistics-----------\")\n        print(\"All audio files (without errors):   % 6d\" % self.countAudioFiles)\n        print(\"Converted files:                    % 6d\" % (self.countDifferentFormat + self.countHigherBitrate))\n        print(\"    - different format:             % 6d\" % self.countDifferentFormat)\n        print(\"    - %3s but higher bitrate:       % 6d\" % (self.outputExtension[1:], self.countHigherBitrate))\n        print(\"Errors:                             % 6d\" % self.countErrors)\n        print(\"Non-audio files:                    % 6d\" % self.countNonAudioFiles)\n        print(\"------------------------------------------\")\n\n    def check(self, path):\n        match = re.match(audio_file_regex, path)\n\n        if not match:\n            self.countNonAudioFiles += 1\n            return False\n\n        filename = match.group(\"filename\")\n        ext = match.group(\"extension\")\n\n        self.countAudioFiles += 1\n        if ext != self.outputExtension:\n            self.countDifferentFormat += 1\n            return True\n\n        bitrate = get_bitrate(path)\n        if self.verbose > 0:\n            sys.stdout.write(\"% 3s kb/s: %s\\n\" % (bitrate, filename))\n        if bitrate > self.bitrate:\n            self.countHigherBitrate += 1\n            return True\n        return False\n\n    async def run(self):\n        # We could use the default single-threaded executor with basically the same performance\n        # (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number\n        # of workers and thus the maximum number of concurrent subprocesses.\n        with ThreadPoolExecutor(max_workers=cores_count()) as executor:\n            loop = asyncio.get_event_loop()\n            tasks = [\n                loop.run_in_executor(executor, self.worker, path)\n                for path in self.queue_generator()\n            ]\n            for result in await asyncio.gather(*tasks):\n                pass\n\n        self.print_stats()\n\n    def worker(self, path):\n        path = os.path.abspath(path)\n\n        try:\n            # check bitrate/filetype etc., skip if conversion not necessary\n            if not self.check(path) or self.dry_run:\n                return\n            print(\"Converting: {}\".format(path))\n            convert(path, self.outputExtension, self.bitrate, self.deleteAfter)\n        except ConversionError as e:\n            msg = \"ERROR: failed to convert file '{}'\".format(path)\n            if self.verbose > 0:\n                msg += \"\\n\" + e.message\n            print(msg, file=sys.stderr)\n            self.countErrors += 1\n        except GettingBitrateError as e:\n            msg = \"ERROR: failed to get bitrate from file '{}'\".format(path)\n            if self.verbose > 0:\n                msg += \"\\n\" + e.message\n            print(msg, file=sys.stderr)\n            self.countErrors += 1\n        else:\n            print(\"Done: {}\".format(path))\n\n    def queue_generator(self):\n        \"\"\" For each directory in self.files returns generator returning full paths to mp3 files in that folder.\n            If self.files contains file paths instead of directory, it's returned as [file].\n        \"\"\"\n\n        def walk(root):\n            dirs = []\n            files = []\n            for entry in os.scandir(root):\n                if entry.is_dir():\n                    dirs.append(entry.name)\n                elif entry.is_file():\n                    files.append(entry.name)\n\n            # first yield found files, then recurse into subdirs\n            for f in files:\n                yield os.path.join(root, f)\n            if self.recursive:\n                for d in dirs:  # recurse into subdir\n                    for f in walk(os.path.join(root, d)):\n                        yield f\n\n        for path in self.paths:\n            if os.path.isdir(path):\n                for f in walk(path):\n                    yield f\n            else:\n                yield path\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"convert all audio files in given folder (recursively) to specified bitrate, skip if bitrate is less or equal\")\n    parser.add_argument(\"path\", action=\"store\", nargs=\"+\", help=\"path to file(s) to convert - filename or directory\")\n    parser.add_argument(\"-r\", \"--recursive\", action=\"store_true\", help=\"browse folders recursively\")\n    parser.add_argument(\"--dry-run\", action=\"store_true\", help=\"don't convert, only print stats\")\n    parser.add_argument(\"-b\", \"--bitrate\", action=\"store\", type=int, metavar=\"BITRATE\", default=\"128\", help=\"set bitrate - in kb/s, default=128\")\n    parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0, help=\"set verbosity level\")\n    parser.add_argument(\"--delete-after\", action=\"store_true\", help=\"delete old files after conversion\")\n    parser.add_argument(\"--output-extension\", choices=audio_types, type=str, default=\"mp3\", help=\"set output extension\")\n\n    args = parser.parse_args()\n\n    tmp = TempFiles()\n    main = Main(args)\n    asyncio.run(main.run())\n"
  },
  {
    "path": "nat-launch-subnet.sh",
    "content": "#!/bin/bash\n\n\nfunction print_launch_subnet_usage()\n{\n  echo \"USAGE\"\n  echo \"  $0 <up|down>\"\n  cat <<'CONFIG'\n\nREQUIRED VARIABLES\n  # The network interface card (NIC) that is connected to the internet or other\n  # wide area network.\n  wan_nic=\"wlan0\"\n\n  # The network interface card connected to the subnet.\n  subnet_nic=\"eth0\"\n\n  # The subnet IP mask.\n  mask=/24\n\n  # The subnet IP range.\n  subnet_ip=10.0.0.0$mask\n\n  # The IP of the subnet NIC on the subnet.\n  server_ip=10.0.0.100$mask\n\n  # The IP tables binary to use.\n  iptables=/usr/bin/idemptables\n\n  # The dnsmasq arguments - PID and lease files to use.\n  dnsmasq_pid=/tmp/dhcpd.pid\n  dnsmasq_lease=/tmp/dhcpd.lease\n\n  # The port of DNS service, see dnsmasq(8) for details. Specify \"0\" to disable DNS server.\n  dnsmasq_port=53\n\n  # The DHCP range, see dnsmasq(8) for details.\n  dnsmasq_dhcp_range=\"192.168.1.100,192.168.1.200,12h\"\n\nOPTIONAL VARIABLES\n  # Function or external scripts to run before before and after bringing the\n  # subnet NIC up or down: pre_up, post_up, pre_down, post_down\n\n  # pre_up as a function:\n  # function pre_up()\n  # {\n  # }\n\n  # pre_up as a script:\n  # pre_up=/path/to/script\n\n  # ip_forward=0\n  # The value of /proc/sys/net/ipv4/ip_forward to restore when shutting down\n  # the subnet.\nCONFIG\n}\n\nfunction launch_subnet()\n{\n  set -e\n\n  if [[ -z $1 ]]\n  then\n    print_launch_subnet_usage\n    exit 1\n  else\n    action=\"$1\"\n  fi\n\n  if [[ -z $wan_nic ]]\n  then\n    echo \"wan_nic is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $subnet_nic ]]\n  then\n    echo \"subnet_nic is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $mask ]]\n  then\n    echo \"mask is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $subnet_ip ]]\n  then\n    echo \"subnet_ip is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $server_ip ]]\n  then\n    echo \"server_ip is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $iptables ]]\n  then\n    echo \"iptables is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $dnsmasq_pid ]]\n  then\n    echo \"dnsmasq_pid is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $dnsmasq_lease ]]\n  then\n    echo \"dnsmasq_lease is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $dnsmasq_port ]]\n  then\n    echo \"dnsmasq_port is undefined\"\n    exit 1\n  fi\n\n  if [[ -z $dnsmasq_dhcp_range ]]\n  then\n    echo \"dnsmasq_dhcp_range is undefined\"\n    exit 1\n  fi\n\n\n  case \"$action\" in\n    up)\n\n      # Enable IP forwarding.\n      echo 1 > /proc/sys/net/ipv4/ip_forward\n\n      ## iptables rules are changed to fit my firewall config\n      ## see http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html for original rules\n\n      # Open up DNS (53) and DHCP (67) ports on subnet_nic.\n      \"$iptables\" -A nat-subnet -i \"$subnet_nic\" -s \"$subnet_ip\" -p tcp --dport 53 -j ACCEPT\n      \"$iptables\" -A nat-subnet -i \"$subnet_nic\" -s \"$subnet_ip\" -p udp --dport 53 -j ACCEPT\n      \"$iptables\" -A nat-subnet -i \"$subnet_nic\" -p udp --dport 67 -j ACCEPT\n\n      # Reply to ICMP (ping) packets so clients can check their connections.\n      \"$iptables\" -A nat-subnet -i \"$subnet_nic\" -p icmp --icmp-type echo-request -j ACCEPT\n      #\"$iptables\" -A OUTPUT -i \"$subnet_nic\" -p icmp --icmp-type echo-reply -j ACCEPT\n\n      # Allow postrouting to wan_nic (for e.g. internet access on the subnet).\n      \"$iptables\" -t nat -A POSTROUTING -s \"$subnet_ip\" -o \"$wan_nic\" -j MASQUERADE\n\n      # Enable forwarding from subnet_nic to wan_nic (and back via related and established connections).\n      \"$iptables\" -A FORWARD -i \"$subnet_nic\" -s \"$subnet_ip\" -o \"$wan_nic\" -j ACCEPT\n      \"$iptables\" -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n\n      # Bring down subnet_nic, configure it and bring it up again.\n      if [[ ! -z $pre_up ]]\n      then\n        ip link set dev \"$subnet_nic\" down\n        \"$pre_up\"\n      fi\n      ip link set dev \"$subnet_nic\" up\n      if [[ ! -z $post_up ]]\n      then\n        \"$post_up\"\n      fi\n\n      # Set the static IP for subnet_nic.\n      ip addr add \"$server_ip\" dev \"$subnet_nic\"\n\n      # Ensure the lease file exists.\n      mkdir -p -- \"${dnsmasq_lease%/*}\"\n      [[ -f $dnsmasq_lease ]] || touch \"$dnsmasq_lease\"\n\n      # Launch the DHCP server\n      dnsmasq \\\n          --pid-file=\"$dnsmasq_pid\" \\\n          --dhcp-leasefile=\"$dnsmasq_lease\" \\\n          --port=\"$dnsmasq_port\" \\\n          --interface=\"$subnet_nic\" \\\n          --except-interface=lo \\\n          --bind-interfaces \\\n          --dhcp-range=\"$dnsmasq_dhcp_range\" \\\n          --dhcp-authoritative \\\n          --dhcp-option=6,\"${server_ip%/*}\"\n    ;;\n\n    down)\n      # Kill the DHCP server.\n      if [[ -f $dnsmasq_pid ]]\n      then\n        kill $(cat \"$dnsmasq_pid\") && rm \"$dnsmasq_pid\" && echo \"killed server\"\n      fi\n\n      if [[ ! -z $pre_down ]]\n      then\n        \"$pre_down\"\n      fi\n      ip addr delete \"$server_ip\" dev \"$subnet_nic\"\n      ip link set dev \"$subnet_nic\" down\n      if [[ ! -z $post_down ]]\n      then\n        \"$post_down\"\n      fi\n\n      # Undo all of the changes above in reverse order.\n      \"$iptables\" -D FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n      \"$iptables\" -D FORWARD -i \"$subnet_nic\" -s \"$subnet_ip\" -o \"$wan_nic\" -j ACCEPT\n      \"$iptables\" -t nat -D POSTROUTING -s \"$subnet_ip\" -o \"$wan_nic\" -j MASQUERADE\n      #\"$iptables\" -D OUTPUT -i \"$subnet_nic\" -p icmp --icmp-type echo-reply -j ACCEPT\n      \"$iptables\" -D nat-subnet -i \"$subnet_nic\" -p icmp --icmp-type echo-request -j ACCEPT\n      \"$iptables\" -D nat-subnet -i \"$subnet_nic\" -p udp --dport 67 -j ACCEPT\n      \"$iptables\" -D nat-subnet -i \"$subnet_nic\" -s \"$subnet_ip\" -p udp --dport 53 -j ACCEPT\n      \"$iptables\" -D nat-subnet -i \"$subnet_nic\" -s \"$subnet_ip\" -p tcp --dport 53 -j ACCEPT\n\n\n      if [[ ! -z $ip_forward ]]\n      then\n        if [[ $ip_forward != $(cat /proc/sys/net/ipv4/ip_forward) ]]\n        then\n          echo $ip_forward > /proc/sys/net/ipv4/ip_forward\n        fi\n      else\n        echo 0 > /proc/sys/net/ipv4/ip_forward\n      fi\n    ;;\n\n    *)\n      print_launch_subnet_usage\n      exit 1\n    ;;\n  esac\n}\n"
  },
  {
    "path": "nat-launch.sh",
    "content": "#!/bin/bash\n\n# Original author: Xyne\n# http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html\n\nfunction print_usage() {\n    echo \"usage: $0 <WAN interface> <subnet interface> <up|down>\"\n}\n\nif [[ $EUID -ne 0 ]]; then\n    echo \"This script must be run as root.\" >&2\n    exit 1\nfi\n\nif [[ -z $3 ]]; then\n    print_usage\n    exit 1\nelse\n    wan_nic=\"$1\"\n    subnet_nic=\"$2\"\n    action=\"$3\"\nfi\n\n\nmask=/24\nsubnet_ip=192.168.1.0$mask\nserver_ip=192.168.1.23$mask\niptables=/usr/bin/idemptables\ndnsmasq_pid=/run/dnsmasq_$subnet_nic.pid\ndnsmasq_lease=/run/dnsmasq_$subnet_nic.lease\ndnsmasq_port=0\ndnsmasq_dhcp_range=\"192.168.1.100,192.168.1.150,6h\"\n\nsource nat-launch-subnet.sh\n\nlaunch_subnet \"$action\"\n"
  },
  {
    "path": "notify-brightness.sh",
    "content": "#! /bin/bash\n\n# LCD brightness notification (level changed by ACPI, no action required)\n\n# duration in ms\nduration=1500\n\n# get brightness level, set title\nlevel=$(cat \"/sys/class/backlight/intel_backlight/brightness\")\nmax=$(cat \"/sys/class/backlight/intel_backlight/max_brightness\")\npercent=$(( $level * 100 / $max ))\ntitle=\"LCD brightness\"\n\n# create fancy bar\nf=$((percent/10))\ne=$((10-f))\nfchars='◼◼◼◼◼◼◼◼◼◼'\nechars='◻◻◻◻◻◻◻◻◻◻'\nbar=\"${fchars:0:f}${echars:0:e} $percent%\"\n\nnotify-send --app-name=VolumeNotification --expire-time=\"$duration\" --urgency=low --transient \"$title\" \"$bar\"\n"
  },
  {
    "path": "notify-volume.sh",
    "content": "#!/bin/bash\n\n# volume control (up/down/mute/unmute/toggle) + notification\n\n# duration in ms\nduration=1500\n\nnotify () {\n    # get volume level\n    percent=$(pactl get-sink-volume @DEFAULT_SINK@ | grep -Po '\\d+(?=%)' | head -n 1)\n\n    # check if muted, set title\n    if [[ $(pactl get-sink-mute @DEFAULT_SINK@) == \"Mute: yes\" ]]; then\n        title=\"Volume muted\"\n    else\n        title=\"Volume\"\n    fi\n\n    # create fancy bar\n    f=$((percent/10))\n    e=$((10-f))\n    fchars='◼◼◼◼◼◼◼◼◼◼'\n    echars='◻◻◻◻◻◻◻◻◻◻'\n    bar=\"${fchars:0:f}${echars:0:e} $percent%\"\n\n    notify-send --app-name=VolumeNotification --category=device --expire-time=\"$duration\" --urgency=low --transient \"$title\" \"$bar\"\n}\n\n# redirect stdout of this script to /dev/null\nexec > /dev/null\n\ncase \"$1\" in\n    up)\n        pactl set-sink-volume @DEFAULT_SINK@ +5%\n        pactl set-sink-mute @DEFAULT_SINK@ 0\n        ;;\n    down)\n        pactl set-sink-volume @DEFAULT_SINK@ -5%\n        pactl set-sink-mute @DEFAULT_SINK@ 0\n        ;;\n    mute)\n        pactl set-sink-mute @DEFAULT_SINK@ 1\n        ;;\n    unmute)\n        pactl set-sink-mute @DEFAULT_SINK@ 0\n        ;;\n    toggle)\n        pactl set-sink-mute @DEFAULT_SINK@ toggle\n        ;;\nesac\n\nnotify\n"
  },
  {
    "path": "pacman-disowned.sh",
    "content": "#!/bin/sh\n\ntmp=${TMPDIR-/tmp}/pacman-disowned-$UID-$$\ndb=$tmp/db\nfs=$tmp/fs\n\nmkdir \"$tmp\"\ntrap 'rm -rf \"$tmp\"' EXIT\n\npacman -Qlq | sort -u > \"$db\"\n\nfind /etc /opt /usr ! -name lost+found \\( -type d -printf '%p/\\n' -o -print \\) | sort > \"$fs\"\n\ncomm -23 \"$fs\" \"$db\"\n"
  },
  {
    "path": "pdf-extract.sh",
    "content": "#!/bin/bash\n\n# exit on error\nset -e\n\nany2img() {\n    convert -density 150 \"$1\" -quality 100 \"$2\" &>/dev/null\n}\n\npdf2img() {\n    echo \"Splitting single pdf file by pages (tiff)\"\n    stapler burst \"$1\"\n    base=${1%.*}\n    for i in \"${base}_\"*.pdf\n    do\n        out=pg${i#\"$base\"}  # will result in 'pg_123.pdf'\n        out=${out%.*}.tiff  # replace extension\n        echo \"$out\"\n#        any2img \"$i\" \"$out\"\n        convert -density 300 \"$i\" -compress lzw \"$out\"\n        rm -f \"$i\"\n    done\n}\n\ndjvu2img() {\n    echo \"Splitting single djvu file by pages (tiff)\"\n    pages=`djvused -e \"n\" \"$1\"`\n    for (( i=1; i<=$pages; i++ ))\n    do\n        num=$(printf \"%03d\" \"$i\")\n        out=\"pg_$num.tiff\"\n        echo \"  $out\"\n        ddjvu -page=$i -format=tiff \"$1\" \"$out\"\n    done\n}\n\npath=$(realpath \"$1\")\nfilename=$(basename \"$path\")\nextension=${filename##*.}\nbasename=${filename%.*} # filename without extension\n\n# create directory for extracted images\nmkdir -p \"$basename\"\ncp \"$path\" \"$basename\"\ncd \"$basename\"\n\nif [[ \"$extension\" == \"pdf\" ]]; then\n    pdf2img \"$filename\"\n    rm -f \"$filename\"\nelif [[ \"$extension\" == \"djvu\" ]]; then\n    djvu2img \"$filename\"\n    rm -f \"$filename\"\nelse\n    echo \"Supported file types: pdf, djvu\"\n    exit 1\nfi\n"
  },
  {
    "path": "perm.sh",
    "content": "#!/bin/bash\n\nopt=${1:-'-h'}\ndir=${2:-'.'}\n\nfmode=0644\ndmode=0755\n\ncase \"$1\" in\n    -a) # dirs and files\n        find \"$2\" -type d -exec chmod $dmode \"{}\" +\n        find \"$2\" -type f -exec chmod $fmode \"{}\" +\n        ;;\n    -d)\n        find \"$2\" -type d -exec chmod $dmode \"{}\" +\n        ;;\n    -f) \n        find \"$2\" -type f -exec chmod $fmode \"{}\" +\n        ;;\n    *)\n        printf \"Usage: $(basename $0) option [directory]\n  -a \\t set permissions of files and directories to $fmode, resp. $dmode.\n  -d \\t set permissions of directories to $dmode.\n  -f \\t set permissions of files to $fmode.\n  -h \\t print this help.\n\"\n        ;;\nesac\n"
  },
  {
    "path": "pythonscripts/__init__.py",
    "content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\n# hack - enable importing from _this_ directory\nsys.path.append(os.path.dirname(__file__))\n\nfrom misc import *\nfrom tempfiles import *\nfrom terminal import *\n"
  },
  {
    "path": "pythonscripts/cpu.py",
    "content": "#! /usr/bin/env python3\n\ndef cores_count():\n    f = open(\"/proc/cpuinfo\")\n    for line in f.readlines():\n        if line.startswith(\"cpu cores\"):\n            try:\n                _, n = line.split(\":\")\n                return int(n.strip())\n            except ValueError:\n                continue\n    return 1\n"
  },
  {
    "path": "pythonscripts/daemon.py",
    "content": "#! /usr/bin/env python\n\nimport os\n\ndef spawnDaemon(*args, detach_fds=True):\n    \"\"\"Spawn a completely detached subprocess (i.e., a daemon).\n\n    E.g. for mark:\n    spawnDaemon(\"../bin/producenotify.py\", \"producenotify.py\", \"xx\")\n    \"\"\"\n    if len(args) == 0:\n        raise ValueError(\"no arguments supplied\")\n\n    # fork the first time (to make a non-session-leader child process)\n    try:\n        pid = os.fork()\n    except OSError as e:\n        raise RuntimeError(\"1st fork failed: %s [%d]\" % (e.strerror, e.errno))\n    if pid != 0:\n        # parent (calling) process is all done\n        return\n\n    # detach from controlling terminal (to make child a session-leader)\n    os.setsid()\n    try:\n        pid = os.fork()\n    except OSError as e:\n        raise RuntimeError(\"2nd fork failed: %s [%d]\" % (e.strerror, e.errno))\n        raise Exception(\"%s [%d]\" % (e.strerror, e.errno))\n    if pid != 0:\n        # child process is all done\n        os._exit(0)\n\n    if detach_fds:\n        # grandchild process now non-session-leader, detached from parent\n        # grandchild process must now close all open files\n        try:\n            maxfd = os.sysconf(\"SC_OPEN_MAX\")\n        except (AttributeError, ValueError):\n            maxfd = 1024\n\n        for fd in range(maxfd):\n            try:\n                os.close(fd)\n            except OSError: # ERROR, fd wasn't open to begin with (ignored)\n                pass\n\n        # redirect stdin, stdout and stderr to /dev/null\n        os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)\n        os.dup2(0, 1)\n        os.dup2(0, 2)\n\n    # and finally let's execute the executable for the daemon!\n    try:\n        os.execvp(args[0], args)\n    except Exception as e:\n        # oops, we're cut off from the world, let's just give up\n        os._exit(255)\n"
  },
  {
    "path": "pythonscripts/ffparser.py",
    "content": "#!/usr/bin/env python\n\nimport json\nimport subprocess\nimport shlex\nfrom pprint import pprint\n\n\nffprobe = \"ffprobe -v quiet -print_format json -show_format -show_streams \"\n\n\nclass FFprobeParser:\n    def __init__(self, path):\n        self.data = json.loads(subprocess.check_output(ffprobe + shlex.quote(path), shell=True, universal_newlines=True))\n\n        self.format = self.data[\"format\"]\n        self.audio = None\n        self.video = None\n        for stream in self.data[\"streams\"]:\n            if self.audio is None and stream[\"codec_type\"] == \"audio\":\n                self.audio = stream\n            if self.video is None and stream[\"codec_type\"] == \"video\":\n                self.video = stream\n\n    def _get(self, option, attribute):\n        return getattr(self, option)[attribute]\n\n    def _getBitrate(self, option):\n        if option == \"audio\":\n            try:\n                return int(self._get(\"audio\", \"bit_rate\"))\n            except:\n                return int(self._getBitrate(\"format\")) - int(self._getBitrate(\"video\"))\n        elif option == \"video\":\n            try:\n                return int(self._get(\"video\", \"bit_rate\"))\n            except:\n                return int(self._getBitrate(\"format\")) - int(self._getBitrate(\"audio\"))\n        elif option == \"format\":\n            try:\n                return int(self._get(\"format\", \"bit_rate\"))\n            except:\n                return None\n\n    def get(self, option, attribute):\n        \"\"\" 'option' is one of \"audio\", \"video\", \"format\"\n            'attribute' is the json attribute to query\n        \"\"\"\n        if attribute == \"bit_rate\":\n            return self._getBitrate(option)\n        else:\n            try:\n                return self._get(option, attribute)\n            except:\n                return None\n\n    def pprint(self, option):\n        \"\"\" 'option' is one of \"audio\", \"video\", \"format\",\n            otherwise 'self.data' is printed\n        \"\"\"\n        pprint(getattr(self, option, self.data))\n\n"
  },
  {
    "path": "pythonscripts/logger.py",
    "content": "#! /usr/bin/env python\n\n\"\"\"\nSimple logger object. Log level is integer for easy comparison.\n\"\"\"\n\nimport sys\n\nclass Logger:\n   def __init__(self, log_level, prog_name):\n       self.log_level = log_level\n       self.prog_name = prog_name\n       self.filename = None\n\n   def prefix(self, msg):\n       if self.filename is None:\n           return msg\n       return \"%s: %s\" % (self.filename, msg)\n\n   def debug(self, msg):\n       if self.log_level >= 4:\n           print(self.prefix(msg))\n\n   def info(self, msg):\n       if self.log_level >= 3:\n           print(self.prefix(msg))\n\n   def warning(self, msg):\n       if self.log_level >= 2:\n           print(self.prefix(\"WARNING: %s\" % msg))\n\n   def error(self, msg):\n       if self.log_level >= 1:\n           sys.stderr.write(\"%s: %s\\n\" % (self.prog_name, msg))\n\n   def critical(self, msg, retval=1):\n       self.error(msg)\n       sys.exit(retval)\n"
  },
  {
    "path": "pythonscripts/misc.py",
    "content": "#! /usr/bin/env python\n\n\"\"\"\nHuman-readable file size. Algorithm does not use a for-loop. It has constant\ncomplexity, O(1), and is in theory more efficient than algorithms using a for-loop.\n\nOriginal source code from:\nhttp://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size\n\"\"\"\n\nfrom math import log\n\nunit_list = {\n    \"long\": list(zip(['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'], [0, 0, 1, 2, 2, 2])),\n    \"short\": list(zip(['B', 'K', 'M', 'G', 'T', 'P'], [0, 0, 1, 2, 2, 2])),\n}\n\ndef format_sizeof(num, unit_format=\"long\"):\n    if num > 1:\n        exponent = min(int(log(num, 1024)), len(unit_list[unit_format]) - 1)\n        quotient = float(num) / 1024**exponent\n        unit, num_decimals = unit_list[unit_format][exponent]\n        format_string = '{:.%sf} {}' % (num_decimals)\n        return format_string.format(quotient, unit)\n    else:\n        return str(int(num)) + \" B\"\n\n\n\n\"\"\"\nNice time format, useful for ETA etc. Output is never longer than 6 characters.\n\"\"\"\n\ndef format_time(seconds):\n    w, s = divmod(seconds, 3600*24*7)\n    d, s = divmod(s, 3600*24)\n    h, s = divmod(s, 3600)\n    m, s = divmod(s, 60)\n    if w > 0:\n        return \"%dw\" % w\n    if d > 0:\n        return \"%dd%02dh\" % (d, h)\n    if h > 0:\n        return \"%02dh%02dm\" % (h, m)\n    if m > 0:\n        return \"%02dm%02ds\" % (m, s)\n    return str(s)\n\n\n\n\"\"\"\nGet content of any readable text file.\n\"\"\"\n\ndef cat(fname):\n    try:\n        f = open(fname, \"r\")\n        s = f.read()\n        f.close()\n        return s.strip()\n    except:\n        return None\n\n\n\n\"\"\"\nReturns a string of at most `max_length` characters, cutting\nonly at word-boundaries. If the string was truncated, `suffix`\nwill be appended.\n\"\"\"\n\nimport re\n\ndef smart_truncate(text, max_length=100, suffix='...'):\n    if len(text) > max_length:\n        pattern = r'^(.{0,%d}\\S)\\s.*' % (max_length-len(suffix)-1)\n        return re.sub(pattern, r'\\1' + suffix, text)\n    else:\n        return text\n\n\n\n\"\"\"\nRecursive directory creation function (like 'mkdir -p' in linux).\n\"\"\"\n\nimport os\n\ndef mkdir(path):\n    try:\n        os.makedirs(path)\n    except OSError as e:\n        if e.errno != 17:\n            raise e\n"
  },
  {
    "path": "pythonscripts/tempfiles.py",
    "content": "#! /usr/bin/env python\n\n\"\"\"\nCreate temporary file, close file descriptor and return full path of the file.\n\"\"\"\n\nimport os\nimport tempfile\nimport atexit\n\nclass TempFiles:\n    def __init__(self):\n        self.tempFiles = []\n        atexit.register(self.removeAll)\n\n    def removeAll(self):\n        for file in self.tempFiles[:]:\n            self.remove(file)\n\n    def remove(self, file):\n        if file in self.tempFiles and os.path.exists(file):\n            os.remove(file)\n            self.tempFiles.remove(file)\n\n    def getTempFileName(self, prefix=\"tmp\", suffix=\"\", dir=None, text=False):\n        fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir, text=text)\n        os.close(fd)\n        self.tempFiles.append(path)\n        return path\n\n"
  },
  {
    "path": "pythonscripts/terminal.py",
    "content": "#! /usr/bin/env python\n\n\"\"\"\nLinux terminal colors.\n\"\"\"\n\n#import sys\n\nCOLORS = {\"black\":30, \"red\":31, \"green\":32, \"yellow\":33, \"blue\":34, \"magenta\":35, \"cyan\":36, \"white\":37}\n\ndef colorize(color, text):\n    c = COLORS[color]\n    return \"\\033[1;%im%s\\033[0m\" % (c, text)\n#    if sys.stdout.isatty():\n#        c = COLORS[color]\n#        return \"\\033[1;%im%s\\033[0m\" % (c, text)\n#    else:\n#        return text\n\ndef getColor(status, download_speed=0):\n    if status == \"error\":\n        return \"red\"\n    elif status == \"active\":\n        if download_speed > 0:\n            return \"blue\"\n        else:\n            return \"yellow\"\n    elif status == \"complete\":\n        return \"green\"\n    elif status == \"paused\":\n        return \"cyan\"\n    elif status == \"waiting\":\n        return \"magenta\"\n    else:\n        return \"\"\n\n\n\n\"\"\"\nGet size of unix terminal as tuple (width, height).\nWhen all fails, default value is (80, 25).\n\nOriginal source code from:\nhttp://stackoverflow.com/a/566752\n\"\"\"\n\ndef getTerminalSize():\n    import os\n    env = os.environ\n    def ioctl_GWINSZ(fd):\n        try:\n            import fcntl, termios, struct, os\n            cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\n        except:\n            return None\n        return cr\n    cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n    if not cr:\n        try:\n            fd = os.open(os.ctermid(), os.O_RDONLY)\n            cr = ioctl_GWINSZ(fd)\n            os.close(fd)\n        except:\n            pass\n    if not cr:\n        try:\n            cr = (env['LINES'], env['COLUMNS'])\n        except:\n            cr = (25, 80)\n    return int(cr[1]), int(cr[0])\n\n"
  },
  {
    "path": "qemu-launcher.sh",
    "content": "#! /usr/bin/bash\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\nfunction print_usage() {\n    echo \"usage: $0 <VM name>\"\n}\n\n## Generate name of TAP interface to create\nfunction get_tap_name() {\n    for (( i=0; i<$tap_limit; i++ )); do\n        local name=\"tap$i\"\n        if [[ ! -d \"/sys/class/net/$name\" ]]; then\n            echo \"$name\"\n            break\n        fi\n    done\n}\n\n# do not run as root\nif [[ $EUID -eq 0 ]]; then\n    echo \"This script is not supposed to be run as root.\" >&2\n    exit 1\nfi\n\n# parse command line arguments\nif [[ -z $1 ]]; then\n    print_usage\n    exit 1\nelse\n    vm_name=\"$1\"\nfi\n\n\nsudo_args=(\"-Ap\" \"Enter your root password (QEMU launcher script)\")\nusername=$(whoami)\ntap_limit=10            # maximum number of TAP interfaces created by this script\ntap_nic=$(get_tap_name)\nbr_nic=\"br0-qemu\"       # bridge interface name (will be created)\nwan_nic=\"wlan0\"         # WAN interface name (for NAT)\n\n\ncase \"$vm_name\" in\n    btrfs)\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" up\n\n        qemu-system-x86_64 \\\n            -name \"$vm_name\" \\\n            -monitor stdio \\\n            -enable-kvm -smp 2 -cpu host -m 1024 \\\n            -vga qxl -spice port=5931,disable-ticketing \\\n            -drive file=\"/home/lahwaacz/virtual_machines/archlinux-btrfs.raw\",if=virtio,cache=none -boot once=c \\\n            -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py \"$vm_name\") -net tap,ifname=\"$tap_nic\",script=no,downscript=no,vhost=on \\\n            -usbdevice tablet\n\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" down\n    ;;\n    virtarch)\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" up\n\n        qemu-system-x86_64 \\\n            -name \"$vm_name\" \\\n            -monitor stdio \\\n            -enable-kvm -smp 2 -cpu host -m 1024 \\\n            -vga qxl -spice port=5931,disable-ticketing \\\n            -drive file=\"/home/lahwaacz/virtual_machines/archlinux.raw\",if=virtio,cache=none -boot once=c \\\n            -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py \"$vm_name\") -net tap,ifname=\"$tap_nic\",script=no,downscript=no,vhost=on \\\n            -usbdevice tablet\n\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" down\n    ;;\n    winxp)\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" up\n\n        qemu-system-i386 \\\n            -name \"$vm_name\" \\\n            -monitor stdio \\\n            -enable-kvm -smp 2 -cpu host -m 1024 \\\n            -vga qxl -spice port=5930,disable-ticketing \\\n            -drive file=\"/home/lahwaacz/virtual_machines/winxp.raw\",if=virtio,cache=none -boot order=c \\\n            -net nic,model=virtio,macaddr=$(qemu-mac-hasher.py \"$vm_name\") -net tap,ifname=\"$tap_nic\",script=no,downscript=no,vhost=on \\\n            -usbdevice tablet \\\n            -soundhw ac97 \\\n            -localtime\n\n        sudo \"${sudo_args[@]}\" qemu-tap-helper.sh \"$username\" \"$tap_nic\" \"$br_nic\" \"$wan_nic\" down\n    ;;\n    liveiso)\n        if [[ -z \"$2\" ]]; then\n            echo \"You must specify the ISO file as a second argument.\" >&2\n            exit 1\n        fi\n\n        qemu-system-x86_64 \\\n            -name \"$vm_name\" \\\n            -monitor stdio \\\n            -enable-kvm -smp 2 -cpu host -m 1024 \\\n            -vga virtio \\\n            -display gtk,gl=on \\\n            -drive file=\"$2\",if=virtio,media=cdrom -boot once=d \\\n            -net nic -net user \\\n            -usbdevice tablet\n    ;;\n    liveiso-efi)\n        if [[ -z \"$2\" ]]; then\n            echo \"You must specify the ISO file as a second argument.\" >&2\n            exit 1\n        fi\n        if [[ ! -e \"/usr/share/ovmf/x64/OVMF_CODE.fd\" ]]; then\n            echo \"File /usr/share/ovmf/x64/OVMF_CODE.fd does not exist. Is the package ovmf installed?\" >&2\n            exit 1\n        fi\n\n        qemu-system-x86_64 \\\n            -bios /usr/share/ovmf/x64/OVMF_CODE.fd \\\n            -name \"$vm_name\" \\\n            -monitor stdio \\\n            -enable-kvm -smp 2 -cpu host -m 1024 \\\n            -vga virtio \\\n            -display gtk,gl=on \\\n            -drive file=\"$2\",if=virtio,media=cdrom -boot once=d \\\n            -net nic -net user \\\n            -usbdevice tablet\n    ;;\n    *)\n        echo \"Unknown VM name specified: $vm_name\" >&2\n        exit 1\n    ;;\nesac\n\n\n### frequently/previously used options:\n\n## user-mode networking\n# -net nic,model=virtio -net user\n\n## user-mode networking with redirect (localhost:2222 -> 10.0.2.15:22)\n# -net nic,model=virtio -net user -redir tcp:2222:10.0.2.15:22\n"
  },
  {
    "path": "qemu-mac-hasher.py",
    "content": "#!/usr/bin/env python\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\nimport sys\nimport zlib\n\nif len(sys.argv) != 2:\n    print(\"usage: %s <VM Name>\" % sys.argv[0])\n    sys.exit(1)\n\ncrc = zlib.crc32(sys.argv[1].encode(\"utf-8\")) & 0xffffffff\ncrc = str(hex(crc))[2:]\nprint(\"52:54:%s%s:%s%s:%s%s:%s%s\" % tuple(crc))\n"
  },
  {
    "path": "qemu-tap-helper.sh",
    "content": "#! /usr/bin/bash\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\n########## Functions ##########\n\n## Check if a string represents a network interface\n# $1: potential interface name\nfunction is_interface() {\n    [[ -d \"/sys/class/net/$1\" ]]\n}\n\n## Create new TAP interface\n# $1: name of the interface to create\nfunction create_tap() {\n    if ! is_interface \"$1\"; then\n        echo \"Creating TAP interface '$1'\"\n        ip tuntap add \"$1\" mode tap user \"$username\"\n        ip link set dev \"$1\" up\n    fi\n}\n\n## Delete TAP interface\n# $1: name of the interface to delete\nfunction del_tap() {\n    echo \"Deleting TAP interface '$1'\"\n    ip link set dev \"$1\" down\n    ip tuntap del \"$1\" mode tap\n}\n\n## Check if the bridge has any interface\n# $1: bridge interface name\nfunction bridge_is_empty() {\n    [[ $(ls \"/sys/class/net/$1/brif\" | wc -w) == \"0\" ]]\n}\n\n## Create bridge interface if it does not exist\n# $1: bridge interface name\nfunction create_br() {\n    if is_interface \"$1\"; then\n        if [[ ! -d \"/sys/class/net/$1/brif\" ]]; then\n            echo \"Interface '$1' already exists and is not a bridge\"\n            exit 1\n        fi\n    else\n        echo \"Creating bridge interface '$1'\"\n        ip link add name \"$1\" type bridge\n        ip link set dev \"$1\" up\n\n        # Xyne's excellent script to launch NAT\n        echo \"Starting NAT\"\n        nat-launch.sh \"$wan_nic\" \"$1\" up\n    fi\n}\n\n## Delete bridge interface if it exists and has no interface\n# $1: bridge interface name\nfunction del_br() {\n    if bridge_is_empty \"$1\"; then\n        # Xyne's excellent script to launch NAT\n        echo \"Stopping NAT\"\n        nat-launch.sh \"$wan_nic\" \"$1\" down\n\n        echo \"Deleting bridge interface '$1'\"\n        ip link set dev \"$1\" down\n        ip link delete \"$1\" type bridge\n    fi\n}\n\n## Add interface to the bridge\n# $1: bridge interface name\n# $2: name of the interface to add\nfunction br_add_iface() {\n    echo \"Adding interface '$2' to bridge '$1'\"\n    ip link set dev \"$2\" promisc on up\n    ip addr flush dev \"$2\" scope host &>/dev/null\n    ip addr flush dev \"$2\" scope site &>/dev/null\n    ip addr flush dev \"$2\" scope global &>/dev/null\n    ip link set dev \"$2\" master \"$1\"\n    # skip forwarding delay\n    bridge link set dev \"$2\" state 3\n}\n\n## Remove interface from the bridge\n# $1: bridge interface name\n# $2: name of the interface to remove\nfunction br_rm_iface() {\n    echo \"Removing interface '$2' from bridge '$1'\"\n    ip link set \"$2\" promisc off down\n    ip link set dev \"$2\" nomaster\n}\n\n########## Main ###############\n\nfunction print_qemu_tap_helper_usage() {\n    echo \"usage: $0 <username> <TAP interface> <bridge interface> <WAN interface> <up|down>\"\n    echo \"  <TAP interface> and <bridge interface> will be created,\"\n    echo \"  NAT from <WAN interface> to <bridge interface> will be set up\"\n}\n\nif [[ $EUID -ne 0 ]]; then\n    echo \"This script must be run as root.\" >&2\n    exit 1\nfi\n\nif [[ -z $4 ]]; then\n    print_qemu_tap_helper_usage\n    exit 1\nelse\n    username=\"$1\"\n    tap_nic=\"$2\"\n    br_nic=\"$3\"\n    wan_nic=\"$4\"\n    action=\"$5\"\nfi\n\n# exit on errors\nset -e\n\ncase \"$action\" in\n    up)\n        create_br \"$br_nic\"\n        create_tap \"$tap_nic\"\n        br_add_iface \"$br_nic\" \"$tap_nic\"\n    ;;\n    down)\n        br_rm_iface \"$br_nic\" \"$tap_nic\"\n        del_tap \"$tap_nic\"\n        del_br \"$br_nic\"\n    ;;\n    *)\n        print_qemu_tap_helper_usage\n        exit 1\n    ;;\nesac\n"
  },
  {
    "path": "remove-dead-symlinks.sh",
    "content": "#! /bin/bash\n\n# recursively remove dead symlinks\n\nshopt -s globstar\n\n# non-recursive version: 'for itm in *'\nfor itm in **/*\ndo\n    if [ -h \"$itm\" ]\n    then\n        target=$(readlink -fn \"$itm\")\n        if [ ! -e \"$target\" ]\n        then\n            echo \"$itm\"\n            rm \"$itm\"\n        fi\n    fi\ndone\n"
  },
  {
    "path": "replaygain.py",
    "content": "#! /usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport taglib\n\nfrom pythonscripts.cpu import cores_count\nfrom pythonscripts.logger import Logger\n\nclass ReplayGain:\n    \"\"\" Will consider all files to belong to one album.\n    \"\"\"\n\n    def __init__(self, logger, options, files):\n        # logger\n        self.log = logger\n        self.log.filename = None\n\n        # internals\n        self.raw_lines = []\n        self.data_files = []\n        self.data_album = {}\n\n        # options\n        self.force = options.force\n        self.force_album = options.force_album\n        self.force_track = options.force_track\n        self.files = files\n\n    def run(self):\n        # check if all files have ReplayGain tags; mp3gain runs very long\n        if not (self.force or self.force_album or self.force_track) and self.all_files_have_replaygain_tags():\n            self.log.error(\"All files already have ReplayGain tags, no action taken.\")\n            return\n        if self.run_mp3gain():\n            self.update_tags()\n\n    def all_files_have_replaygain_tags(self):\n        \"\"\" Quick analysis to determine if input files contain replaygain_* tags.\n        \"\"\"\n        for fname in self.files:\n            # open id3 tag\n            f = taglib.File(fname)\n\n            tags = set([tag.lower() for tag in f.tags.keys() if tag.lower().startswith(\"replaygain_\")])\n            return tags == set([\"replaygain_track_gain\", \"replaygain_album_gain\", \"replaygain_track_peak\", \"replaygain_album_peak\"])\n\n    def run_mp3gain(self):\n        \"\"\" Compute values for replaygain_* tags.\n        \"\"\"\n        self.log.debug(\"running mp3gain on specified files\")\n        cmd = [\"mp3gain\", \"-q\", \"-o\", \"-s\", \"s\"] + self.files\n        ret = True\n        try:\n            raw_data = subprocess.check_output(cmd, universal_newlines=True)\n            self.raw_lines = raw_data.splitlines()\n        except subprocess.CalledProcessError as exc:\n            code = exc.returncode\n            msg = \"mp3gain returned error status: \" + str(code) + \"\\n\"\n            msg += \"-----------mp3gain output dump-----------\\n\"\n            msg += exc.output\n            msg += \"\\n-----------------------------------------\\n\"\n            self.log.error(msg)\n            ret = False\n        except Exception as e:\n            print(e)\n            ret = False\n            raise\n        finally:\n            return ret\n\n    def update_tags(self):\n        \"\"\" Add computed replaygain_* tags into all files.\n        \"\"\"\n        self.log.debug(\"parsing mp3gain output\")\n        album_parts = self.raw_lines[-1].strip().split(\"\\t\")\n\n        # just in case\n        if album_parts[0] != '\"Album\"':\n            self.log.error(\"unable to parse mp3gain output\")\n            return\n\n        a_gain = float(album_parts[2])              # album gain\n        a_peak = float(album_parts[3]) / 32768.0    # album peak\n\n        del self.raw_lines[0]   # header\n        del self.raw_lines[-1]  # album summary\n        for line in self.raw_lines:\n            parts = line.strip().split(\"\\t\")\n            fname = parts[0]    # filename\n\n            self.log.filename = fname\n            self.log.debug(\"begin processing file\")\n\n            t_gain = float(parts[2])                # track gain\n            t_peak = float(parts[3]) / 32768.0      # track peak\n\n            # set t_gain, t_peak, a_gain, a_peak depending on options\n            if self.force_album:\n                t_gain = a_gain\n                t_peak = a_peak\n            elif self.force_track:\n                a_gain = t_gain\n                a_peak = t_peak\n\n            # open id3 tag\n            f = taglib.File(fname)\n\n            # update tag\n            f.tags[\"REPLAYGAIN_TRACK_GAIN\"] = \"%.2f dB\" % t_gain\n            f.tags[\"REPLAYGAIN_ALBUM_GAIN\"] = \"%.2f dB\" % a_gain\n            f.tags[\"REPLAYGAIN_TRACK_PEAK\"] = \"%.6f\" % t_peak\n            f.tags[\"REPLAYGAIN_ALBUM_PEAK\"] = \"%.6f\" % a_peak\n\n            # save tag\n            self.log.debug(\"saving modified ID3 tag\")\n            f.save()\n\n            self.log.debug(\"done processing file\")\n            self.log.filename = None\n\n\nclass Main:\n    \"\"\" Will parse input pattern and create ReplayGain object on every directory found.\n    \"\"\"\n\n    def __init__(self, logger, options):\n        self.logger = logger\n        self.options = options\n        self.recursive = options.recursive\n        self.paths = options.files\n        del options.recursive   # don't want to pass it to ReplayGain object\n        del options.files   # don't want to pass it to ReplayGain object\n\n    async def run(self):\n        # We could use the default single-threaded executor with basically the same performance\n        # (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number\n        # of workers and thus the maximum number of concurrent subprocesses.\n        with ThreadPoolExecutor(max_workers=cores_count()) as executor:\n            loop = asyncio.get_event_loop()\n            tasks = [\n                loop.run_in_executor(executor, self.worker, path)\n                for path in self.queue_generator()\n            ]\n            for result in await asyncio.gather(*tasks):\n                pass\n\n    def worker(self, paths):\n        paths = sorted(list(paths))\n\n        # skip dirs not containing any mp3 file\n        if len(paths) == 0:\n            return\n\n        # write info\n        print(\"Procesing:\")\n        for path in paths:\n            print(\"  \" + path)\n\n        try:\n            # create ReplayGain object, pass files and run\n            rg = ReplayGain(self.logger, self.options, paths)\n            rg.run()\n        except Exception as e:\n            print(e, file=sys.stderr)\n            raise\n\n    def queue_generator(self):\n        \"\"\" For each directory in self.files returns generator returning full paths to mp3 files in that folder.\n            If self.files contains file paths instead of directory, it's returned as [file].\n        \"\"\"\n\n        def walk(root):\n            dirs = []\n            files = []\n            for entry in os.scandir(root):\n                if entry.is_dir():\n                    dirs.append(entry.name)\n                elif entry.is_file() and entry.name.endswith(\".mp3\"):\n                    files.append(entry.name)\n\n            # first yield found files, then recurse into subdirs\n            if files:\n                yield (os.path.join(root, x) for x in files)\n            if self.recursive:\n                for d in dirs:  # recurse into subdir\n                    for x in walk(os.path.join(root, d)):\n                        yield x\n\n        for path in self.paths:\n            if os.path.isdir(path):\n                for x in walk(path):\n                    yield x\n            else:\n                yield [path]\n\ndef main(prog_name, options):\n    logger = Logger(options.log_level, prog_name)\n    logger.debug(\"Selected mp3 files:\")\n    logger.debug(\"\\n\".join(sorted(options.files)))\n    main = Main(logger, options)\n    asyncio.run(main.run())\n\ndef argparse_path_handler(path):\n    if not os.path.exists(path):\n        raise argparse.ArgumentTypeError(\"invalid path: '%s'\" % path)\n    if os.path.isfile(path) and not path.endswith(\".mp3\"):\n        raise argparse.ArgumentTypeError(\"not a mp3 file: '%s'\" % path)\n    return os.path.abspath(path)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Write correct ReplayGain tags into mp3 files; uses mp3gain internally\")\n\n    # log level options\n    log = parser.add_mutually_exclusive_group()\n    log.add_argument(\"-q\", \"--quiet\", dest=\"log_level\", action=\"store_const\", const=0, default=1, help=\"do not output error messages\")\n    log.add_argument(\"-v\", \"--verbose\", dest=\"log_level\", action=\"store_const\", const=3, help=\"output warnings and informational messages\")\n    log.add_argument(\"-d\", \"--debug\", dest=\"log_level\", action=\"store_const\", const=4, help=\"output debug messages\")\n\n    parser.add_argument(\"-r\", \"--recursive\", action=\"store_true\", help=\"when path to directory is specified, browse it recursively (albums still respected)\")\n    parser.add_argument(\"--force\", action=\"store_true\", help=\"force overwriting of existing ID3v2 ReplayGain tags\")\n    group = parser.add_mutually_exclusive_group()\n    group.add_argument(\"--force-album\", action=\"store_true\", help=\"write replaygain_album_{gain,peak} values into replaygain_track_{gain,peak} tags\")\n    group.add_argument(\"--force-track\", action=\"store_true\", help=\"write replaygain_track_{gain,peak} values into replaygain_album_{gain,peak} tags\")\n\n    parser.add_argument(\"files\", nargs=\"+\", metavar=\"FILE | FOLDER\", type=argparse_path_handler, help=\"path to mp3 file(s) or directory(ies)\")\n\n    args = parser.parse_args()\n    main(sys.argv[0], args)\n"
  },
  {
    "path": "rexe",
    "content": "#!/bin/bash\n\nset -e\n\nHOST=\"\"\nLOCAL_PATH=\"\"\nREMOTE_PATH=\"\"\nREXE_DIR=\"rexe\"\nCMD=\"\"\nDOWNLOAD=\"true\"\nEXCLUDE=()\nEXCLUDE_DOWNLOAD=()\n\nfunction handle_argument()\n{\n    if [[ \"$HOST\" == \"\" ]]; then\n        case \"$1\" in\n            *:*)\n                HOST=\"${1%:*}\"\n                REMOTE_PATH=\"${1#*:}\"\n                ;;\n            *)\n                HOST=\"$1\"\n                ;;\n        esac\n        return\n    fi\n    if [[ \"$CMD\" == \"\" ]]; then\n        CMD=\"$1\"\n    else\n        CMD=\"$CMD $1\"\n    fi\n}\n\nwhile [ \"$#\" -gt 0 ]; do\n    if [[ \"$CMD\" == \"\" ]]; then\n        case \"$1\" in\n            --path=*) LOCAL_PATH=\"${1#*=}\"; shift 1;;\n            -p|--path) LOCAL_PATH=\"$2\"; shift 2;;\n            --no-download) DOWNLOAD=\"false\"; shift 1;;\n            --exclude=*) EXCLUDE+=(\"--exclude\" \"${1#*=}\"); shift 1;;\n            --exclude) EXCLUDE+=(\"--exclude\" \"$2\"); shift 2;;\n            --exclude-download=*) EXCLUDE_DOWNLOAD+=(\"--exclude\" \"${1#*=}\"); shift 1;;\n            --exclude-download) EXCLUDE_DOWNLOAD+=(\"--exclude\" \"$2\"); shift 2;;\n\n            -*) echo \"unknown option: $1\" >&2; exit 1;;\n            *) handle_argument \"$1\"; shift 1;;\n        esac\n    else\n        handle_argument \"$1\"\n        shift 1\n    fi\ndone\n\nif [[ \"$HOST\" == \"\" ]]; then\n    echo \"error: remote host was not specified.\" >&2\n    exit 1\nfi\nif [[ \"$CMD\" == \"\" ]]; then\n    echo \"error: remote command was not sepcified.\" >&2\n    exit 1\nfi\n\n# fill in defaults\nif [[ \"$LOCAL_PATH\" == \"\" ]]; then\n    LOCAL_PATH=$(pwd)\nfi\n# NOTE: the tmpfs for $XDG_RUNTIME_DIR may be too small (e.g. only 10% of the available RAM)\n#if [[ \"$REMOTE_PATH\" == \"\" ]]; then\n#    REMOTE_PATH=$(ssh \"$HOST\" echo '$XDG_RUNTIME_DIR')\n#fi\nif [[ \"$REMOTE_PATH\" == \"\" ]]; then\n    REMOTE_PATH=\"/tmp\"\n    REXE_DIR=\"rexe_$(ssh \"$HOST\" whoami)\"\nfi\n\nif [[ ! -d \"$LOCAL_PATH\" ]]; then\n    echo \"error: local path '$LOCAL_PATH' is does not exist or is not a directory.\" >&2\n    exit 1\nfi\n\n# create remote main directory for rexe with restricted permissions\necho \"Creating remote directory '$REMOTE_PATH/$REXE_DIR'...\"\nssh \"$HOST\" mkdir -m 0700 -p \"$REMOTE_PATH/$REXE_DIR\"\n\n# change remote path into full path\n_basename=$(basename \"$LOCAL_PATH\")\nREMOTE_PATH=\"$REMOTE_PATH/$REXE_DIR/$_basename\"\n\necho \"Uploading local directory '$LOCAL_PATH' to remote directory '$REMOTE_PATH'...\"\nrsync -rlptD \"$LOCAL_PATH/\" \"$HOST:$REMOTE_PATH/\" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]}\n\necho \"Executing remote command '$CMD'...\"\n# ignore errors of the ssh command to always run rsync afterwards (even on keyboard interrupt)\nset +e\nssh -t \"$HOST\" \"cd ${REMOTE_PATH@Q}; bash --login -c -- ${CMD@Q}\"\nset -e\n\nif [[ \"$DOWNLOAD\" != \"false\" ]]; then\n    echo \"Synchronizing remote directory '$REMOTE_PATH' into the local directory...\"\n    # FIXME: EXCLUDE_DOWNLOAD does not work correctly for wildcards\n    echo rsync -rlptD \"$HOST:$REMOTE_PATH/\" \"$LOCAL_PATH/\" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v\n    rsync -rlptD \"$HOST:$REMOTE_PATH/\" \"$LOCAL_PATH/\" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v\nfi\n"
  },
  {
    "path": "rmshit.py",
    "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nfrom pathlib import Path\n\nimport yaml\n\nDEFAULT_CONFIG = \"\"\"\n- ~/.adobe              # Flash crap\n- ~/.macromedia         # Flash crap\n- ~/.recently-used\n- ~/.local/share/recently-used.xbel\n- ~/.thumbnails\n- ~/.gconfd\n- ~/.gconf\n- ~/.local/share/gegl-0.2\n- ~/.FRD/log/app.log   # FRD\n- ~/.FRD/links.txt     # FRD\n- ~/.objectdb          # FRD\n- ~/.gstreamer-0.10\n- ~/.pulse\n- ~/.esd_auth\n- ~/.config/enchant\n- ~/.spicec            # contains only log file; unconfigurable\n- ~/.dropbox-dist\n- ~/.parallel\n- ~/.dbus\n- ~/ca2                # WTF?\n- ~/ca2~               # WTF?\n- ~/.distlib/          # contains another empty dir, don't know which software creates it\n- ~/.bazaar/           # bzr insists on creating files holding default values\n- ~/.bzr.log\n- ~/.nv/\n- ~/.viminfo           # configured to be moved to ~/.cache/vim/viminfo, but it is still sometimes created...\n- ~/.npm/              # npm cache\n- ~/.java/\n- ~/.swt/\n- ~/.oracle_jre_usage/\n- ~/.openjfx/\n- ~/.org.jabref.gui.JabRefMain/\n- ~/.org.jabref.gui.MainApplication/\n- ~/.jssc/\n- ~/.tox/              # cache directory for tox\n- ~/.pylint.d/\n- ~/.qute_test/\n- ~/.QtWebEngineProcess/\n- ~/.qutebrowser/      # created empty, only with webengine backend\n- ~/.asy/\n- ~/.cmake/\n- ~/.gnome/\n- ~/unison.log\n- ~/.texlive/\n- ~/.w3m/\n- ~/.subversion/\n- ~/nvvp_workspace/    # created empty even when the path is set differently in nvvp\n- ~/.ansible/\n- ~/.fltk/\n- ~/.vnc/\n- ~/.local/share/Trash/    # VSCode puts deleted files here\n\"\"\"\n\n\ndef get_size(path):\n    if Path(path).is_dir():\n        return sum(p.stat().st_size for p in Path(path).rglob(\"*\"))\n    return Path(path).stat().st_size\n\n\ndef read_config():\n    \"\"\"\n    Reads the list of shitty files from a YAML config.\n    \"\"\"\n    config_dir = os.getenv(\"XDG_CONFIG_HOME\", os.path.expanduser(\"~/.config/\"))\n    config_path = Path(config_dir) / \"rmshit.yaml\"\n\n    # write default config if it does not exist\n    if not config_path.exists():\n        with open(config_path, \"w\") as f:\n            print(DEFAULT_CONFIG.strip(), file=f)\n\n    with open(config_path, \"r\") as f:\n        return yaml.safe_load(f)\n\n\ndef yesno(question, default=\"n\"):\n    \"\"\"\n    Asks the user for YES or NO, always case insensitive.\n    Returns True for YES and False for NO.\n    \"\"\"\n    prompt = f\"{question} (y/[n]) \"\n\n    ans = input(prompt).strip().lower()\n\n    if not ans:\n        ans = default\n\n    if ans == \"y\":\n        return True\n    return False\n\n\ndef format_size(size_in_bytes):\n    \"\"\"Format file size in bytes to a human-readable string.\"\"\"\n    if size_in_bytes <= 0:\n        return \"0 bytes\"\n\n    units = [\"bytes\", \"KiB\", \"MiB\", \"GiB\"]\n    size = float(size_in_bytes)\n    unit_index = min(int((size_in_bytes.bit_length() - 1) // 10), len(units) - 1)\n    size /= 1024**unit_index\n\n    return f\"{size:.4g} {units[unit_index]}\"\n\n\ndef rmshit():\n    shittyfiles = read_config()\n\n    print(\"Found shittyfiles:\")\n    found = []\n    total_size = 0\n    for f in shittyfiles:\n        absf = os.path.expanduser(f)\n        if os.path.exists(absf):\n            found.append(absf)\n            size = get_size(absf)\n            total_size += size\n            print(f\"    {f} ({format_size(size)})\")\n\n    if len(found) == 0:\n        print(\"No shitty files found :)\")\n        return\n\n    if yesno(\"Remove all?\", default=\"n\"):\n        for f in found:\n            if os.path.isfile(f):\n                os.remove(f)\n            else:\n                shutil.rmtree(f)\n        print(f\"All cleaned, {format_size(total_size)} freed.\")\n    else:\n        print(\"No file removed\")\n\n\nif __name__ == \"__main__\":\n    rmshit()\n"
  },
  {
    "path": "run-pvserver",
    "content": "#!/bin/bash\n\nhost=\"$1\"\n\nif [[ \"$host\" == \"\" ]]; then\n    echo \"usage: $0 [user@]hostname\"\n    exit 1\nfi\n\nhostname=$(ssh \"$host\" \"uname -n\")\nport=11111\ndisplay=42\n\n# NOTE: bash -lc is needed to get full $PATH by sourcing /etc/profile.d/*.sh\nssh -C -t -L \"localhost:$port:$hostname:$port\" \"$host\" \"bash -lc 'xvfb-run --server-num=$display mpirun -np 2 pvserver --displays=:$display --server-port=$port'\"\n"
  },
  {
    "path": "sway-sensible-terminal",
    "content": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport json\nimport subprocess\n\nPATH = os.environ.get(\"PATH\", \"/usr/bin\")\nTERMINAL = os.environ.get(\"TERMINAL\", \"alacritty\")\nARGS = sys.argv[1:]\n\ndef get_cwd(tree):\n    for node in tree.get(\"nodes\", []):\n        if node[\"focused\"]:\n            name = node[\"name\"]\n            for part in name.split(\":\"):\n                if part.startswith(\"/\") and os.path.exists(part):\n                    return part\n        cwd = get_cwd(node)\n        if cwd is not None:\n            return cwd\n\nif os.path.isfile(\"/usr/bin/swaymsg\"):\n    prog = \"swaymsg\"\nelif os.path.isfile(\"/usr/bin/i3-msg\"):\n    prog = \"i3-msg\"\nelse:\n    raise Exception(\"Neither swaymsg or i3-msg was found in /usr/bin/\")\n\ncmd = subprocess.run(f\"{prog} -t get_tree\", shell=True, check=True, capture_output=True)\ntree = json.loads(cmd.stdout)\ncwd = get_cwd(tree)\nif cwd is not None:\n    if \"alacritty\" in TERMINAL:\n        ARGS = [\"--working-directory\", cwd, *ARGS]\n    else:\n        ARGS = [\"-d\", cwd, *ARGS]\n\nfor d in PATH.split(\":\"):\n    path = os.path.join(d, TERMINAL)\n    if not os.path.isfile(path):\n        continue\n\n    os.execl(path, path, *ARGS)\n\nraise Exception(f\"Error: terminal '{TERMINAL}' was not found in $PATH ({PATH})\")\n"
  },
  {
    "path": "teams-attendance-parser.py",
    "content": "#! /usr/bin/env python3\n\n\"\"\"\nTHE BEER-WARE LICENSE (Revision 42):\nJakub Klinkovský wrote this file. As long as you retain this notice you\ncan do whatever you want with this stuff. If we meet some day, and you think\nthis stuff is worth it, you can buy me a beer in return.\n\"\"\"\n\nimport argparse\nimport os.path\nimport datetime\nimport sys\n\n# maybe depends on the locale in which MS Teams runs...\nTIMESTAMP_FORMATS = [\n    \"%m/%d/%Y, %I:%M:%S %p\",\n    \"%d. %m. %Y %H:%M:%S\",\n]\n\nCLASS_LENGTH = datetime.timedelta(minutes=100)\n\ndef parse_timestamp(timestamp):\n    last_error = None\n    for format in TIMESTAMP_FORMATS:\n        try:\n            return datetime.datetime.strptime(timestamp, format)\n        except ValueError as e:\n            last_error = e\n            continue\n    raise last_error\n\ndef parse_attendance_list(path):\n    print(f\"Parsing file {path}...\")\n    data = {}\n    text = open(path, \"r\", encoding=\"utf-16\").read()\n\n    for line in text.splitlines():\n        # parse items on the line\n        name, action, timestamp = line.split(\"\\t\")\n        # skip header line\n        if name == \"Full Name\" or name == \"Celé jméno\":\n            continue\n\n        # validate items\n        assert \",\" in name, name\n        assert action in {\"Joined\", \"Left\", \"Připojeno\", \"Odpojil(a) se\"}, f\"unknown action: {action}\"\n        timestamp = parse_timestamp(timestamp)\n\n        # initialize data\n        user_actions = data.setdefault(name, [])\n\n        # append action\n        user_actions.append((action, timestamp))\n\n    return data\n\ndef get_attendance(class_start, actions):\n    class_end = class_start + CLASS_LENGTH\n\n    # make sure actions are sorted by timestamp\n    actions.sort(key=lambda a: a[1])\n\n    # calculate\n    attendance = datetime.timedelta()\n    joined = None\n    for i, item in enumerate(actions):\n        action, timestamp = item\n        if action in {\"Joined\", \"Připojeno\"}:\n            assert joined is None\n            joined = timestamp\n        elif action in {\"Left\", \"Odpojil(a) se\"}:\n            assert joined is not None\n            attendance += timestamp - joined\n            joined = None\n        else:\n            assert False\n    # handle the missing \"Left\" action\n    if joined is not None:\n        attendance += class_end - joined\n\n    return attendance\n\ndef print_attendance(teacher, class_start, data):\n    print(f\"Class teacher:\\t{teacher}\")\n    print(f\"Class start:\\t{class_start}\")\n    print(\"Attendance:\")\n\n    for name in sorted(data.keys()):\n        attendance = get_attendance(class_start, data[name])\n        perc = attendance.seconds / CLASS_LENGTH.seconds * 100\n        print(f\"   {name:<30}\\t{attendance} ({perc:.0f}%)\")\n\n    print()\n\ndef main(path):\n    data = parse_attendance_list(path)\n    teacher = list(data.keys())[0]\n    class_start = data[teacher][0][1]\n    del data[teacher]\n    print_attendance(teacher, class_start, data)\n\nparser = argparse.ArgumentParser(description=\"parser for MS Teams attendance list files\")\nparser.add_argument(\"path\", nargs=\"+\", help=\"path to the attendance list file\")\n\nargs = parser.parse_args()\nfor p in args.path:\n    if os.path.isfile(p):\n        main(p)\n    else:\n        print(f\"ERROR: {p} is not a file\", file=sys.stderr)\n"
  },
  {
    "path": "toggle-touchpad.sh",
    "content": "#!/bin/sh\n# Toggle touchpad status\n# Using libinput and xinput\n\n# Use xinput list and do a search for touchpads. Then get the first one and get its name.\ndevice=\"$(xinput list | grep -P '(?<= )[\\w\\s:]*(?i)(touchpad|synaptics)(?-i).*?(?=\\s*id)' -o | head -n1)\"\n\n# If it was activated disable it and if it wasn't disable it\n[[ \"$(xinput list-props \"$device\" | grep -P \".*Device Enabled.*\\K.(?=$)\" -o)\" == \"1\" ]] &&\n    xinput disable \"$device\" ||\n    xinput enable \"$device\"\n"
  },
  {
    "path": "touch-tree.py",
    "content": "#! /usr/bin/env python\n\n# Little script to \"touch\" directory structure.\n# Works like 'cp -r', but instead of copying full file, the new file is \"touched\",\n# so the tree structure is preserved and only empty files created.\n\nimport sys\nimport os\n\n\nclass Main:\n    def __init__(self, oldRoot, newRoot):\n        self.oldRoot = oldRoot\n        self.newRoot = newRoot\n\n    def browse(self, path):\n        for file in os.listdir(path):\n            absPath = os.path.join(path, file)\n            relPath = os.path.relpath(absPath, self.oldRoot)\n            if os.path.isdir(absPath):\n                os.mkdir(os.path.join(self.newRoot, relPath))\n                self.browse(absPath)\n            elif os.path.isfile(absPath):\n                open(os.path.join(self.newRoot, relPath), \"w\").close()\n\n    def touchTree(self):\n        os.mkdir(newRoot)\n        self.browse(self.oldRoot)\n\nif len(sys.argv) != 3 or not os.path.isdir(sys.argv[1]) or os.path.exists(sys.argv[2]):\n    sys.exit(1)\n\noldRoot = os.path.abspath(sys.argv[1])\nnewRoot = os.path.abspath(sys.argv[2])\n\nprint(oldRoot + \"  =>  \" + newRoot)\nmain = Main(oldRoot, newRoot)\nmain.touchTree()\n"
  },
  {
    "path": "waybar-khal.py",
    "content": "#! /usr/bin/env python3\n\nimport subprocess\nimport json\n\ndata = {}\n\ncmd = [\n    \"khal\",\n    \"list\",\n    \"now\",\n    \"23:59\",\n    \"--once\",\n    \"--format\",\n    \"{start-time} ({location}) {title}{repeat-symbol}{alarm-symbol}\",\n]\noutput = subprocess.run(cmd, check=True, text=True, capture_output=True).stdout\n\nlines = [line.strip() for line in output.split(\"\\n\")]\n\n# filter out lines that do not start with a number\n# (khal list includes headings like \"Monday, 2025-03-31\" for each day)\nlines = [line for line in lines if line and line[0].isdigit()]\n\nif lines:\n    data[\"text\"] = \" \" + lines[0]\n    data[\"tooltip\"] = \"\\n\".join(lines)\nelse:\n    data[\"text\"] = \"\"\n\nprint(json.dumps(data))\n"
  },
  {
    "path": "x",
    "content": "#! /bin/bash\n\n# Some references:\n# https://wiki.archlinux.org/index.php/Bash#Functions\n# https://github.com/robbyrussell/oh-my-zsh/blob/master/plugins/extract/extract.plugin.zsh\n\nfunction extract() {\n    local remove_archive\n    local success\n    local fname\n    local basename\n    local extension\n\n    success=0\n    fname=$(realpath \"$1\")\n    extension=${fname##*.}\n\n    # remove extension from basename\n    basename=$(basename \"${fname%.*}\")\n\n    # hack to recognize .tar.gz etc as extension\n    if [[ \"${basename##*.}\" == \"tar\" ]]; then\n        extension=\"${basename##*.}.$extension\"\n        basename=$(basename \"${basename%.*}\")\n    fi\n\n    # split \\.part[0-9]* from $basename\n    basename=\"${basename%\\.part[0-9]*}\"\n\n    case \"$extension\" in\n        tar.gz|tgz|tar.bz2|tbz|tbz2|tar.xz|txz|tar.lzma|tlz|tar|tar.zst)\n            mkdir \"$basename\"\n            tar xvf \"$fname\" -C \"$basename\"\n            ;;\n        gz|Z)\n            gzip -dkv \"$fname\"\n            ;;\n        bz2)\n            bzip2 -dkv \"$fname\"\n            ;;\n        xz|lzma)\n            xz -dkv \"$fname\"\n            ;;\n        zst)\n            zstd -dkv \"$fname\"\n            ;;\n        zip)\n            unzip \"$fname\" -d \"$basename\"\n            ;;\n        rar)\n            mkdir \"$basename\"\n            pushd \"$basename\"\n                unrar x \"$fname\"\n            popd\n            ;;\n        7z)\n            7za x \"$fname\" -o\"$basename\"\n            ;;\n        *)\n            echo \"extract: '$fname' cannot be extracted\" 1>&2\n            success=1\n            ;;\n    esac\n\n    [[ $success == 0 ]] && success=$?\n\n    # if destination directory contains only one file/dir, move it to cwd\n    if [[ $success == 0 ]]; then\n        count=$(find \"$basename\" -maxdepth 1 -mindepth 1 | wc -l)\n\n        if [[ $count == 1 ]]; then\n            name=$(basename \"$(find \"$basename\" -maxdepth 1 -mindepth 1)\")\n\n            # can't move ./foo/foo into ./foo\n            if [[ \"$basename\" == \"$name\" ]]; then\n                tmp=\"$name.tmp\"\n            else\n                tmp=\"$name\"\n            fi\n\n            mv \"$basename/$name\" \"$tmp\"\n            rmdir \"$basename\"\n            mv \"$tmp\" \"$name\"\n        fi\n    fi\n}\n\nif [[ $# == 0 ]]; then\n    echo \"Usage: $0 file [file ...]\"\n    exit 1\nfi\n\nwhile [[ $# > 0 ]]; do\n    if [[ -f \"$1\" ]]; then\n        extract \"$1\"\n    else\n        echo \"extract: '$1' is not a valid file\"\n    fi\n    shift\ndone\n"
  }
]