Repository: lahwaacz/Scripts
Branch: master
Commit: afb4233b4c9f
Files: 60
Total size: 158.0 KB
Directory structure:
gitextract_szwjc667/
├── .gitmodules
├── Colours-EyeCandy/
│ ├── colourbars
│ ├── colours
│ ├── colourtheme
│ ├── hypnotoad.pl
│ ├── pacman.sh
│ ├── spacey.sh
│ └── tanks.sh
├── README.md
├── aur-check
├── aur-release
├── aur-remotebuild
├── backup-system.sh
├── batmanpager
├── bsnap.sh
├── btrfs-diff
├── btrfs-sync
├── btrfs-sync-WIP
├── clean-aur-dir.py
├── convertToUtf8.py
├── fatcp
├── ffparser.py
├── fmount.py
├── forcemp3convert.sh
├── hddtemp.sh
├── imap-notifier.py
├── img2pdf.sh
├── initscreen.sh
├── maildir-strip-attachments.py
├── makeissue.sh
├── mp3convert.py
├── nat-launch-subnet.sh
├── nat-launch.sh
├── notify-brightness.sh
├── notify-volume.sh
├── pacman-disowned.sh
├── pdf-extract.sh
├── perm.sh
├── pythonscripts/
│ ├── __init__.py
│ ├── cpu.py
│ ├── daemon.py
│ ├── ffparser.py
│ ├── logger.py
│ ├── misc.py
│ ├── tempfiles.py
│ └── terminal.py
├── qemu-launcher.sh
├── qemu-mac-hasher.py
├── qemu-tap-helper.sh
├── remove-dead-symlinks.sh
├── replaygain.py
├── rexe
├── rmshit.py
├── run-pvserver
├── sway-sensible-terminal
├── teams-attendance-parser.py
├── toggle-touchpad.sh
├── touch-tree.py
├── waybar-khal.py
└── x
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitmodules
================================================
[submodule "submodules/cp-p"]
path = submodules/cp-p
url = ../cp-p.git
================================================
FILE: Colours-EyeCandy/colourbars
================================================
#!/bin/sh
# by Him on the Arch boards
# ANSI Color -- use these variables to easily have different color
# and format output. Make sure to output the reset sequence after
# colors (f = foreground, b = background), and use the 'off'
# feature for anything you turn on.
initializeANSI()
{
esc=""
blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m"
yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m"
cyanf="${esc}[36m"; whitef="${esc}[37m"
blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m"
yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m"
cyanb="${esc}[46m"; whiteb="${esc}[47m"
boldon="${esc}[1m"; boldoff="${esc}[22m"
italicson="${esc}[3m"; italicsoff="${esc}[23m"
ulon="${esc}[4m"; uloff="${esc}[24m"
invon="${esc}[7m"; invoff="${esc}[27m"
reset="${esc}[0m"
}
# note in this first use that switching colors doesn't require a reset
# first - the new color overrides the old one.
initializeANSI
cat << EOF
${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${whitef}▆▆▆▆▆▆▆▆▆▆${reset}
${boldon}${blackf} ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::${reset}
${boldon}${redf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${greenf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${yellowf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${bluef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${purplef}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${cyanf}▆▆▆▆▆▆▆▆▆▆${reset} ${boldon}${whitef}▆▆▆▆▆▆▆▆▆▆${reset}
EOF
================================================
FILE: Colours-EyeCandy/colours
================================================
#!/bin/bash
# Original: http://frexx.de/xterm-256-notes/
# http://frexx.de/xterm-256-notes/data/colortable16.sh
# Modified by Aaron Griffin
# and further by Kazuo Teramoto
FGNAMES=(' black ' ' red ' ' green ' ' yellow' ' blue ' 'magenta' ' cyan ' ' white ')
BGNAMES=('DFT' 'BLK' 'RED' 'GRN' 'YEL' 'BLU' 'MAG' 'CYN' 'WHT')
echo " ┌──────────────────────────────────────────────────────────────────────────┐"
for b in $(seq 0 8); do
if [ "$b" -gt 0 ]; then
bg=$(($b+39))
fi
echo -en "\033[0m ${BGNAMES[$b]} │ "
for f in $(seq 0 7); do
echo -en "\033[${bg}m\033[$(($f+30))m ${FGNAMES[$f]} "
done
echo -en "\033[0m │"
echo -en "\033[0m\n\033[0m │ "
for f in $(seq 0 7); do
echo -en "\033[${bg}m\033[1;$(($f+30))m ${FGNAMES[$f]} "
done
echo -en "\033[0m │"
echo -e "\033[0m"
if [ "$b" -lt 8 ]; then
echo " ├──────────────────────────────────────────────────────────────────────────┤"
fi
done
echo " └──────────────────────────────────────────────────────────────────────────┘"
================================================
FILE: Colours-EyeCandy/colourtheme
================================================
#!/bin/bash
#
# This file echoes a bunch of color codes to the
# terminal to demonstrate what's available. Each
# line is the color code of one forground color,
# out of 17 (default + 16 escapes), followed by a
# test use of that color on all nine background
# colors (default + 8 escapes).
#
T='▆ ▆' # The test text
echo -e "\n 40m 41m 42m 43m\
44m 45m 46m 47m";
for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \
'1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \
' 36m' '1;36m' ' 37m' '1;37m';
do FG=${FGs// /}
echo -en " $FGs \033[$FG $T "
for BG in 40m 41m 42m 43m 44m 45m 46m 47m;
do echo -en "$EINS \033[$FG\033[$BG $T \033[0m";
done
echo;
done
echo
================================================
FILE: Colours-EyeCandy/hypnotoad.pl
================================================
#!/usr/bin/perl
# script by karabaja4
# mail: karabaja4@archlinux.us
my $blackFG_yellowBG = "\e[30;43m";
my $blackFG_redBG = "\e[30;41m";
my $blackFG_purpleBG = "\e[30;45m";
my $yellowFG_blackBG = "\e[1;33;40m";
my $yellowFG_redBG = "\e[1;33;41m";
my $redFG_yellowBG = "\e[31;43m";
my $purpleFG_yellowBG = "\e[35;43m";
my $purpleFG_blueBG = "\e[1;35;44m";
my $end = "\e[0m";
system("clear");
print "
${blackFG_yellowBG},'${blackFG_redBG}`${blackFG_yellowBG}`.._${end} ${blackFG_yellowBG},'${blackFG_redBG}`${end}${blackFG_yellowBG}`.${end}
${blackFG_yellowBG}:${blackFG_redBG},${yellowFG_blackBG}--.${end}${blackFG_redBG}_${blackFG_yellowBG}:)\\,:${blackFG_redBG},${yellowFG_blackBG}._,${end}${yellowFG_redBG}.${end}${blackFG_yellowBG}:${end}
${blackFG_yellowBG}:`-${yellowFG_blackBG}-${end}${blackFG_yellowBG},${blackFG_yellowBG}''${end}${redFG_yellowBG}@@\@${end}${blackFG_yellowBG}:`.${yellowFG_redBG}.${end}${blackFG_yellowBG}.';\\${end} All Glory to
${blackFG_yellowBG}`,'${end}${redFG_yellowBG}@@@@@@\@${end}${blackFG_yellowBG}`---'${redFG_yellowBG}@\@${end}${blackFG_yellowBG}`.${end} the HYPNOTOAD!
${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@\@${end}${blackFG_yellowBG}:${end}
${blackFG_yellowBG}/${redFG_yellowBG}@@@@@@@@@@@@@@@@@@\@${end}${blackFG_yellowBG}\\${end}
${blackFG_yellowBG},'${redFG_yellowBG}@@@@@@@@@@@@@@@@@@@@\@${end}${purpleFG_yellowBG}:\\${end}${blackFG_yellowBG}.___,-.${end}
${blackFG_yellowBG}`...,---'``````-..._${redFG_yellowBG}@@@\@${end}${blackFG_purpleBG}|:${end}${redFG_yellowBG}@@@@@@\@${end}${blackFG_yellowBG}\\${end}
${blackFG_yellowBG}( )${end}${redFG_yellowBG}@@\@${end}${blackFG_purpleBG};:${end}${redFG_yellowBG}@@@\@)@@\@${end}${blackFG_yellowBG}\\${end} ${blackFG_yellowBG}_,-.${end}
${blackFG_yellowBG}`. (${end}${redFG_yellowBG}@@\@${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@@@@@@@@\@${end}${blackFG_yellowBG}`'${end}${redFG_yellowBG}@@@\@${end}${blackFG_yellowBG}\\${end}
${blackFG_yellowBG}: `.${end}${blackFG_purpleBG}//${end}${redFG_yellowBG}@@)@@@@@@)@@@@@,\@${end}${blackFG_yellowBG};${end}
${blackFG_purpleBG}|`${purpleFG_yellowBG}.${blackFG_yellowBG} ${end}${purpleFG_yellowBG}_${end}${purpleFG_yellowBG},${blackFG_purpleBG}'/${end}${redFG_yellowBG}@@@@@@@)@@@@)@,'\@${end}${blackFG_yellowBG},'${end}
${blackFG_yellowBG}:${end}${blackFG_purpleBG}`.`${end}${purpleFG_yellowBG}-..____..=${end}${blackFG_purpleBG}:.-${end}${blackFG_yellowBG}':${end}${redFG_yellowBG}@@@@@.@@@@\@_,@@,'${end}
${redFG_yellowBG},'${end}${blackFG_yellowBG}\\ ${end}${blackFG_purpleBG}``--....${end}${purpleFG_blueBG}-)='${end}${blackFG_yellowBG} `.${end}${redFG_yellowBG}_,@\@${end}${blackFG_yellowBG}\\${end} ${redFG_yellowBG})@@\@'``._${end}
${redFG_yellowBG}/\@${end}${redFG_yellowBG}_${end}${redFG_yellowBG}\@${end}${blackFG_yellowBG}`.${end}${blackFG_yellowBG} ${end}${blackFG_redBG}(@)${end}${blackFG_yellowBG} /${end}${redFG_yellowBG}@@@@\@${end}${blackFG_yellowBG})${end} ${redFG_yellowBG}; / \\ \\`-.'${end}
${redFG_yellowBG}(@@\@${end}${redFG_yellowBG}`-:${end}${blackFG_yellowBG}`. ${end}${blackFG_yellowBG}`' ___..'${end}${redFG_yellowBG}@\@${end}${blackFG_yellowBG}_,-'${end} ${redFG_yellowBG}|/${end} ${redFG_yellowBG}`.)${end}
${redFG_yellowBG}`-. `.`.${end}${blackFG_yellowBG}``-----``--${end}${redFG_yellowBG},@\@.'${end}
${redFG_yellowBG}|/`.\\`'${end} ${redFG_yellowBG},',');${end}
${redFG_yellowBG}`${end} ${redFG_yellowBG}(/${end} ${redFG_yellowBG}(/${end}
";
================================================
FILE: Colours-EyeCandy/pacman.sh
================================================
#!/bin/sh
# ANSI Color -- use these variables to easily have different color
# and format output. Make sure to output the reset sequence after
# colors (f = foreground, b = background), and use the 'off'
# feature for anything you turn on.
initializeANSI()
{
esc=""
blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m"
yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m"
cyanf="${esc}[36m"; whitef="${esc}[37m"
blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m"
yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m"
cyanb="${esc}[46m"; whiteb="${esc}[47m"
boldon="${esc}[1m"; boldoff="${esc}[22m"
italicson="${esc}[3m"; italicsoff="${esc}[23m"
ulon="${esc}[4m"; uloff="${esc}[24m"
invon="${esc}[7m"; invoff="${esc}[27m"
reset="${esc}[0m"
}
# note in this first use that switching colors doesn't require a reset
# first - the new color overrides the old one.
clear
initializeANSI
cat << EOF
${yellowf} ▄███████▄${reset} ${redf} ▄██████▄${reset} ${greenf} ▄██████▄${reset} ${bluef} ▄██████▄${reset} ${purplef} ▄██████▄${reset} ${cyanf} ▄██████▄${reset}
${yellowf}▄█████████▀▀${reset} ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄${reset} ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄${reset} ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄${reset} ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄${reset} ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
${yellowf}███████▀${reset} ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███${reset} ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███${reset} ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███${reset} ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███${reset} ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
${yellowf}███████▄${reset} ${redf}████████████${reset} ${greenf}████████████${reset} ${bluef}████████████${reset} ${purplef}████████████${reset} ${cyanf}████████████${reset}
${yellowf}▀█████████▄▄${reset} ${redf}██▀██▀▀██▀██${reset} ${greenf}██▀██▀▀██▀██${reset} ${bluef}██▀██▀▀██▀██${reset} ${purplef}██▀██▀▀██▀██${reset} ${cyanf}██▀██▀▀██▀██${reset}
${yellowf} ▀███████▀${reset} ${redf}▀ ▀ ▀ ▀${reset} ${greenf}▀ ▀ ▀ ▀${reset} ${bluef}▀ ▀ ▀ ▀${reset} ${purplef}▀ ▀ ▀ ▀${reset} ${cyanf}▀ ▀ ▀ ▀${reset}
${boldon}${yellowf} ▄███████▄ ${redf} ▄██████▄ ${greenf} ▄██████▄ ${bluef} ▄██████▄ ${purplef} ▄██████▄ ${cyanf} ▄██████▄${reset}
${boldon}${yellowf}▄█████████▀▀ ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄ ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄ ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄ ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄ ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
${boldon}${yellowf}███████▀ ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███ ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███ ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███ ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███ ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
${boldon}${yellowf}███████▄ ${redf}████████████ ${greenf}████████████ ${bluef}████████████ ${purplef}████████████ ${cyanf}████████████${reset}
${boldon}${yellowf}▀█████████▄▄ ${redf}██▀██▀▀██▀██ ${greenf}██▀██▀▀██▀██ ${bluef}██▀██▀▀██▀██ ${purplef}██▀██▀▀██▀██ ${cyanf}██▀██▀▀██▀██${reset}
${boldon}${yellowf} ▀███████▀ ${redf}▀ ▀ ▀ ▀ ${greenf}▀ ▀ ▀ ▀ ${bluef}▀ ▀ ▀ ▀ ${purplef}▀ ▀ ▀ ▀ ${cyanf}▀ ▀ ▀ ▀${reset}
EOF
================================================
FILE: Colours-EyeCandy/spacey.sh
================================================
#!/bin/bash
#ANSI color scheme script featuring Space Invaders
#
# Original: http://crunchbanglinux.org/forums/post/126921/#p126921
# Modified by lolilolicon
f=3 b=4
for j in f b; do
for i in {0..7}; do
eval ${j}${i}=\$\'\\e\[${!j}${i}m\'
done
done
bld=$'\e[1m'
rst=$'\e[0m'
cat << EOF
$f0 ▄██▄ $f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$f0▄█▀██▀█▄ $f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄ $rst
$f0▀▀█▀▀█▀▀ $f1█▀███████▀█ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4█▀███████▀█ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀ $rst
$f0▄▀▄▀▀▄▀▄ $f1▀ ▀▄▄ ▄▄▀ ▀ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄ $rst
$bld $f0 ▄██▄ $f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$bld $f0▄█▀██▀█▄ $f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$bld $f0▀▀█▀▀█▀▀ $f1█▀███████▀█ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4█▀███████▀█ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
$bld $f0▄▀▄▀▀▄▀▄ $f1▀ ▀▄▄ ▄▄▀ ▀ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
$f7▌$rst
$f7▌$rst
$f7 ▄█▄ $rst
$f7▄█████████▄$rst
$f7▀▀▀▀▀▀▀▀▀▀▀$rst
EOF
================================================
FILE: Colours-EyeCandy/tanks.sh
================================================
#!/bin/sh
# ANSI Color -- use these variables to easily have different color
# and format output. Make sure to output the reset sequence after
# colors (f = foreground, b = background), and use the 'off'
# feature for anything you turn on.
initializeANSI()
{
esc=""
blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m"
yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m"
cyanf="${esc}[36m"; whitef="${esc}[37m"
blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m"
yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m"
cyanb="${esc}[46m"; whiteb="${esc}[47m"
boldon="${esc}[1m"; boldoff="${esc}[22m"
italicson="${esc}[3m"; italicsoff="${esc}[23m"
ulon="${esc}[4m"; uloff="${esc}[24m"
invon="${esc}[7m"; invoff="${esc}[27m"
reset="${esc}[0m"
}
# note in this first use that switching colors doesn't require a reset
# first - the new color overrides the old one.
initializeANSI
cat << EOF
${boldon}${redf} █ ${reset} ${boldon}${greenf} █ ${reset} ${boldon}${yellowf} █ ${reset} ${boldon}${bluef} █ ${reset} ${boldon}${purplef} █ ${reset} ${boldon}${cyanf} █ ${reset}
${boldon}${redf}▄▄ █ ▄▄${reset} ${boldon}${greenf}▄▄ █ ▄▄${reset} ${boldon}${yellowf}▄▄ █ ▄▄${reset} ${boldon}${bluef}▄▄ █ ▄▄${reset} ${boldon}${purplef}▄▄ █ ▄▄${reset} ${boldon}${cyanf}▄▄ █ ▄▄${reset}
${boldon}${redf}███▀▀▀███${reset} ${boldon}${greenf}███▀▀▀███${reset} ${boldon}${yellowf}███▀▀▀███${reset} ${boldon}${bluef}███▀▀▀███${reset} ${boldon}${purplef}███▀▀▀███${reset} ${boldon}${cyanf}███▀▀▀███${reset}
${boldon}${redf}███ █ ███${reset} ${boldon}${greenf}███ █ ███${reset} ${boldon}${yellowf}███ █ ███${reset} ${boldon}${bluef}███ █ ███${reset} ${boldon}${purplef}███ █ ███${reset} ${boldon}${cyanf}███ █ ███${reset}
${boldon}${redf}██ ▀▀▀ ██${reset} ${boldon}${greenf}██ ▀▀▀ ██${reset} ${boldon}${yellowf}██ ▀▀▀ ██${reset} ${boldon}${bluef}██ ▀▀▀ ██${reset} ${boldon}${purplef}██ ▀▀▀ ██${reset} ${boldon}${cyanf}██ ▀▀▀ ██${reset}
${redf} █ ${reset} ${greenf} █ ${reset} ${yellowf} █ ${reset} ${bluef} █ ${reset} ${purplef} █ ${reset} ${cyanf} █ ${reset}
${redf}▄▄ █ ▄▄${reset} ${greenf}▄▄ █ ▄▄${reset} ${yellowf}▄▄ █ ▄▄${reset} ${bluef}▄▄ █ ▄▄${reset} ${purplef}▄▄ █ ▄▄${reset} ${cyanf}▄▄ █ ▄▄${reset}
${redf}███▀▀▀███${reset} ${greenf}███▀▀▀███${reset} ${yellowf}███▀▀▀███${reset} ${bluef}███▀▀▀███${reset} ${purplef}███▀▀▀███${reset} ${cyanf}███▀▀▀███${reset}
${redf}███ █ ███${reset} ${greenf}███ █ ███${reset} ${yellowf}███ █ ███${reset} ${bluef}███ █ ███${reset} ${purplef}███ █ ███${reset} ${cyanf}███ █ ███${reset}
${redf}██ ▀▀▀ ██${reset} ${greenf}██ ▀▀▀ ██${reset} ${yellowf}██ ▀▀▀ ██${reset} ${bluef}██ ▀▀▀ ██${reset} ${purplef}██ ▀▀▀ ██${reset} ${cyanf}██ ▀▀▀ ██${reset}
EOF
================================================
FILE: README.md
================================================
A bunch of scripts I keep in `~/Scripts`, which is included in `$PATH`.
================================================
FILE: aur-check
================================================
#! /usr/bin/env python3
"""
Check the repo for problems and new package versions
"""
import subprocess
from pathlib import Path
import tomlkit.toml_file
SOURCE_DIRS = [
{
"path": Path("~/Arch/packaging/aur/").expanduser(),
"nvchecker_source": "aur",
},
]
NVCHECKER_CONFIG_FILE = Path("~/Arch/packaging/aur/nvchecker.toml").expanduser()
def get_from_SRCINFO(path, key):
with open(path, "r") as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
if k.strip() == key:
return v.strip()
def get_from_PKGBUILD(path, key):
with open(path, "r") as f:
for line in f.readlines():
if line.startswith(f"{key}="):
value = line.split("=", 1)[1].strip()
if value.startswith("'") and value.endswith("'"):
value = value[1:-1]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
return value
def nvchecker():
"""Updates ``nvchecker`` config file with the sources defined in ``SOURCE_DIRS``
and then runs ``nvchecker``.
"""
for src in SOURCE_DIRS:
root_path = src["path"]
# read the config file
config_file = tomlkit.toml_file.TOMLFile(NVCHECKER_CONFIG_FILE)
config = config_file.read()
# iterate over package directories in the source root
for pkg in root_path.iterdir():
if not pkg.is_dir():
continue
elif not (pkg / "PKGBUILD").is_file():
print(f"WARNING: PKGBUILD not found in {pkg}")
continue
# extract from .SRCINFO if it exists
if (pkg / ".SRCINFO").is_file():
pkgname = get_from_SRCINFO(pkg / ".SRCINFO", "pkgname")
# pkgver = get_from_SRCINFO(pkg / ".SRCINFO", "pkgver")
else:
# extract pkgname and pkgver from PKGBUILD in the most hackish way
pkgname = pkg.name
# pkgname = get_from_PKGBUILD(pkg / "PKGBUILD", "pkgname")
# pkgver = get_from_PKGBUILD(pkg / "PKGBUILD", "pkgver")
# ensure that a TOML table for the pkgname exists
if pkgname not in config:
config.add(pkgname, tomlkit.table())
update_config = True
else:
update_config = src.get("nvchecker_overwrite", True)
# update the config file
if update_config:
source = src["nvchecker_source"]
config[pkgname]["source"] = source
if source in {"aur", "archpkg"}:
config[pkgname][source] = pkgname
elif source == "gitlab":
config[pkgname]["host"] = src["nvchecker_host"]
config[pkgname]["gitlab"] = src["nvchecker_gitlab_format"].format(
remote_pkgname=pkgname
)
# write the config file
config_file.write(config)
# run nvchecker
subprocess.run(["nvchecker", "-c", NVCHECKER_CONFIG_FILE], check=True)
def check():
nvchecker()
# TODO: check if rebuild-detector is installed
print("Checking packages that need to be rebuilt...")
subprocess.run(["checkrebuild", "-i", "lahwaacz"], check=True)
# TODO: list packages that are in the database, but package file is deleted or source is missing
if __name__ == "__main__":
check()
================================================
FILE: aur-release
================================================
#!/bin/bash
#
# SPDX-License-Identifier: GPL-3.0-or-later
# aur-remotebuild - build packages remotely using aur-chroot
# (based on commitpkg from devtools)
set -o errexit
shopt -s nullglob
readonly argv0=release
source /usr/share/devtools/lib/common.sh
source /usr/share/devtools/lib/util/srcinfo.sh
source /usr/share/makepkg/util/parseopts.sh
source /usr/share/makepkg/util/util.sh
set -eo pipefail
if [[ ! -f PKGBUILD ]]; then
echo "No PKGBUILD in the current directory!" >&2
exit 1
fi
# Check if releasing from a branch
if ! branchname=$(git symbolic-ref --short HEAD); then
die 'not on any branch'
fi
if [[ "$branchname" != master ]]; then
die 'must be run from the master branch'
fi
# default arguments
server=pkgbuild.com
remote_path=public_html/repo
rsyncopts=("${RSYNC_OPTS[@]}" --perms --chmod='u=rw,go=r')
release_commit=1
release_push=0
release_upload=0
usage() {
echo >&2 "Usage: $argv0 [OPTIONS]"
echo >&2 ""
echo >&2 "Run this script in a PKGBUILD dir to release an already built package."
echo >&2 ""
echo >&2 "The script comprises the following operations:"
echo >&2 ""
echo >&2 "- (default) modified version-controlled files are staged for commit"
echo >&2 "- (default) all build artifacts are signed with gpg"
echo >&2 "- (optional) commits are pushed to the remote git repository"
echo >&2 "- (optional) build artifacts are uploaded to the binary repository"
echo >&2 ""
echo >&2 "OPTIONS"
echo >&2 " --no-commit Do not stage version-controlled files for commit"
echo >&2 " --push Automatically push commits to the remote git repository"
echo >&2 " --upload Automatically upload all build artifacts to the binary"
echo >&2 " repository hosted at $server:$remote_path"
exit 1
}
## option parsing
opt_short=''
opt_long=('no-commit' 'push' 'upload')
opt_hidden=()
if ! parseopts "$opt_short" "${opt_long[@]}" "${opt_hidden[@]}" -- "$@"; then
usage
fi
set -- "${OPTRET[@]}"
while true; do
case "$1" in
--no-commit)
release_commit=0
;;
--push)
release_push=1
;;
--upload)
release_upload=1
;;
--)
shift;
break
;;
esac
shift
done
check_pkgbuild_validity() {
# shellcheck source=/usr/share/pacman/PKGBUILD.proto
. ./PKGBUILD
# skip when there are no sources available
if (( ! ${#source[@]} )); then
return
fi
# validate sources hash algo is at least > sha1
local bad_algos=("cksums" "md5sums" "sha1sums")
local good_hash_algo=false
# from makepkg libmakepkg/util/schema.sh
for integ in "${known_hash_algos[@]}"; do
local sumname="${integ}sums"
if [[ -n ${!sumname} ]] && ! in_array "${sumname}" "${bad_algos[@]}"; then
good_hash_algo=true
break
fi
done
if ! $good_hash_algo; then
die "PKGBUILD lacks a secure cryptographic checksum, insecure algorithms: ${bad_algos[*]}"
fi
}
# Source makepkg.conf; fail if it is not found
if [[ -r '/etc/makepkg.conf' ]]; then
source '/etc/makepkg.conf'
else
die '/etc/makepkg.conf not found!'
fi
# Source user-specific makepkg.conf overrides
if [[ -r "${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf" ]]; then
# shellcheck source=/dev/null
source "${XDG_CONFIG_HOME:-$HOME/.config}/pacman/makepkg.conf"
elif [[ -r "$HOME/.makepkg.conf" ]]; then
# shellcheck source=/dev/null
source "$HOME/.makepkg.conf"
fi
source=()
# shellcheck source=/usr/share/pacman/PKGBUILD.proto
. ./PKGBUILD
pkgbase=${pkgbase:-$pkgname}
if (( ${#validpgpkeys[@]} != 0 )); then
if [[ -d keys ]]; then
for key in "${validpgpkeys[@]}"; do
if [[ ! -f keys/pgp/$key.asc ]]; then
export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files'
fi
done
else
export-pkgbuild-keys || die 'Failed to export valid PGP keys for source files'
fi
git add --force -- keys/pgp/*
fi
# find files which should be under source control
needsversioning=(PKGBUILD)
for s in "${source[@]}"; do
[[ $s != *://* ]] && needsversioning+=("$s")
done
for i in 'changelog' 'install'; do
while read -r file; do
# evaluate any bash variables used
# shellcheck disable=SC2001
eval "file=\"$(sed "s/^\(['\"]\)\(.*\)\1\$/\2/" <<< "$file")\""
needsversioning+=("$file")
done < <(sed -n "s/^[[:space:]]*$i=//p" PKGBUILD)
done
for key in "${validpgpkeys[@]}"; do
needsversioning+=("keys/pgp/$key.asc")
done
# assert that they really are controlled by git
if (( ${#needsversioning[*]} )); then
for file in "${needsversioning[@]}"; do
# skip none existing files
if [[ ! -f "${file}" ]]; then
continue
fi
if ! git ls-files --error-unmatch "$file"; then
die "%s is not under version control" "$file"
fi
done
fi
# check packages for validity
for _arch in "${arch[@]}"; do
for _pkgname in "${pkgname[@]}"; do
fullver=$(get_full_version "$_pkgname")
if pkgfile=$(find_cached_package "$_pkgname" "$fullver" "$_arch"); then
check_package_validity "$pkgfile"
fi
done
fullver=$(get_full_version "$pkgbase")
if pkgfile=$(find_cached_package "$pkgbase-debug" "$fullver" "$_arch"); then
check_package_validity "$pkgfile"
fi
done
# NOTE: not a reality on the AUR...
# check for PKGBUILD standards
#check_pkgbuild_validity
# auto generate .SRCINFO
# shellcheck disable=SC2119
write_srcinfo_file
if (( release_commit )); then
git add --force .SRCINFO
if [[ -n $(git status --porcelain --untracked-files=no) ]]; then
stat_busy 'Staging files'
for f in $(git ls-files --modified); do
git add "$f"
done
for f in $(git ls-files --deleted); do
git rm "$f"
done
stat_done
msgtemplate="upgpkg: $(get_full_version)"
if [[ -n $1 ]]; then
stat_busy 'Committing changes'
git commit -q -m "${msgtemplate}: ${1}" || die
stat_done
else
[[ -z ${WORKDIR:-} ]] && setup_workdir
msgfile=$(mktemp --tmpdir="${WORKDIR}" commitpkg.XXXXXXXXXX)
echo "$msgtemplate" > "$msgfile"
if [[ -n $GIT_EDITOR ]]; then
$GIT_EDITOR "$msgfile" || die
elif giteditor=$(git config --get core.editor); then
$giteditor "$msgfile" || die
elif [[ -n $VISUAL ]]; then
$VISUAL "$msgfile" || die
elif [[ -n $EDITOR ]]; then
$EDITOR "$msgfile" || die
else
die "No usable editor found (tried \$GIT_EDITOR, git config [core.editor], \$VISUAL, \$EDITOR)."
fi
[[ -s $msgfile ]] || die
stat_busy 'Committing changes'
git commit -v -q -F "$msgfile" || die
unlink "$msgfile"
stat_done
fi
fi
if (( release_push )); then
git_remote_branch=$(git rev-parse --abbrev-ref --symbolic-full-name "@{u}")
git_remote=${git_remote_branch%/*}
git_remote_url=$(git remote get-url "$git_remote")
msg 'Fetching remote changes'
git fetch --prune --prune-tags origin || die 'failed to fetch remote changes'
# Check if local branch is up to date and contains the latest origin commit
if remoteref=$(git rev-parse "$git_remote_branch" 2>/dev/null); then
if [[ $(git branch "$branchname" --contains "$remoteref" --format '%(refname:short)') != "$branchname" ]]; then
die "local branch is out of date, run 'git pull --rebase'"
fi
fi
msg "Pushing commits to $git_remote_branch where $git_remote is $git_remote_url"
git push --tags --set-upstream "$git_remote" "$branchname" || abort
else
warning "Not pushing commits because --push was not given."
fi
elif (( release_push )); then
warning "Not pushing commits because --no-commit was given."
fi
declare -a uploads
for _arch in "${arch[@]}"; do
for _pkgname in "${pkgname[@]}"; do
fullver=$(get_full_version "$_pkgname")
if ! pkgfile=$(find_cached_package "$_pkgname" "$fullver" "${_arch}"); then
warning "Skipping %s: failed to locate package file" "$_pkgname-$fullver-$_arch"
continue 2
fi
uploads+=("$pkgfile")
done
fullver=$(get_full_version "$pkgbase")
if ! pkgfile=$(find_cached_package "$pkgbase-debug" "$fullver" "$_arch"); then
continue
fi
if ! is_debug_package "$pkgfile"; then
continue
fi
uploads+=("$pkgfile")
done
for pkgfile in "${uploads[@]}"; do
sigfile="${pkgfile}.sig"
if [[ ! -f $sigfile ]]; then
msg "Signing package %s..." "${pkgfile}"
if [[ -n $GPGKEY ]]; then
SIGNWITHKEY=(-u "${GPGKEY}")
fi
gpg --detach-sign --use-agent --no-armor "${SIGNWITHKEY[@]}" "${pkgfile}" || die
fi
if ! gpg --verify "$sigfile" "$pkgfile" >/dev/null 2>&1; then
die "Signature %s is incorrect!" "$sigfile"
fi
uploads+=("$sigfile")
done
if (( release_upload )) && [[ ${#uploads[*]} -gt 0 ]]; then
new_uploads=()
# convert to absolute paths so rsync can work with colons (epoch)
while read -r -d '' upload; do
new_uploads+=("$upload")
done < <(realpath -z "${uploads[@]}")
uploads=("${new_uploads[@]}")
unset new_uploads
msg 'Uploading all package and signature files'
rsync "${rsyncopts[@]}" "${uploads[@]}" "$server:$remote_path/" || die
# convert to remote paths
declare -a remote_pkgfiles
for pkgfile in "${uploads[@]}"; do
if ! [[ "$pkgfile" = *.sig ]]; then
remote_pkgfiles+=("$remote_path"/"$(basename "$pkgfile")")
fi
done
msg 'Updating remote pacman database'
ssh -t "${SSH_OPTS[@]}" -- "$server" "./repo add ${remote_pkgfiles[*]@Q} && ./repo update"
fi
================================================
FILE: aur-remotebuild
================================================
#!/bin/bash
#
# SPDX-License-Identifier: GPL-3.0-or-later
# aur-remotebuild - build packages remotely using aur-chroot
# (partly based on offload-build from devtools)
set -o errexit
shopt -s nullglob
readonly argv0=remotebuild
if [[ ! -f PKGBUILD ]]; then
echo "No PKGBUILD in the current directory!" >&2
exit 1
fi
# default arguments
repo_name=lahwaacz
chroot_args=(
--create
--update
--build
# makechrootpkg options
--checkpkg
--namcap
)
usage() {
printf >&2 'Usage: %s HOSTNAME [--repo NAME] [--inspect never|always|failure] [--] <aur-chroot args>\n' "$argv0"
exit 1
}
server="$1"
shift
if [[ "$server" == "" ]]; then
usage
fi
source /usr/share/makepkg/util/parseopts.sh
## option parsing
opt_short='d:'
opt_long=('inspect:')
opt_hidden=()
if ! parseopts "$opt_short" "${opt_long[@]}" "${opt_hidden[@]}" -- "$@"; then
usage
fi
set -- "${OPTRET[@]}"
while true; do
case "$1" in
--repo)
shift;
repo_name=$1
;;
--inspect)
shift;
chroot_args+=(--makechrootpkg-args="-x,$1")
;;
--)
shift;
break
;;
esac
shift
done
# pass db name to aur-chroot
chroot_args+=(
--pacman-conf "/etc/aurutils/pacman-$repo_name.conf"
--makepkg-conf "/etc/aurutils/makepkg-$repo_name.conf"
)
# pass remaining arguments to aur-chroot
if (($#)); then
chroot_args+=("$@")
fi
# aur chroot command to run remotely
aur_chroot_cmd=(
env
# aur-chroot does not preserve SRCDEST and LOGDEST by default https://github.com/aurutils/aurutils/issues/1196
AUR_PACMAN_AUTH="sudo --preserve-env=GNUPGHOME,SSH_AUTH_SOCK,SRCDEST,PKGDEST,LOGDEST"
# NOTE: do not clear SRCDEST to use cached directory set in the user's makepkg.conf on the remote host
#SRCDEST=""
PKGDEST=""
LOGDEST=""
aur
chroot
"${chroot_args[@]}"
)
### offload-build-like part
source /usr/share/devtools/lib/common.sh
source /usr/share/devtools/lib/util/makepkg.sh
source /usr/share/devtools/lib/util/srcinfo.sh
source /usr/share/makepkg/util/config.sh
[[ -z ${WORKDIR:-} ]] && setup_workdir
TEMPDIR=$(mktemp --tmpdir="${WORKDIR}" --directory aur-remotebuild.XXXXXXXXXX)
export TEMPDIR
# Load makepkg.conf variables to be available
# shellcheck disable=SC2119
load_makepkg_config
# Use a source-only tarball as an intermediate to transfer files. This
# guarantees the checksums are okay, and guarantees that all needed files are
# transferred, including local sources, install scripts, and changelogs.
export SRCPKGDEST="${TEMPDIR}"
makepkg_source_package || die "unable to make source package"
# Temporary cosmetic workaround makepkg if SRCDEST is set somewhere else
# but an empty src dir is created in PWD. Remove once fixed in makepkg.
rmdir --ignore-fail-on-non-empty src 2>/dev/null || true
# Create a temporary directory on the server
remote_temp=$(
ssh "${SSH_OPTS[@]}" -- "$server" '
temp="${XDG_CACHE_HOME:-$HOME/.cache}/aur-remotebuild" &&
mkdir -p "$temp" &&
mktemp --directory --tmpdir="$temp"
')
# Transfer the srcpkg to the server
msg "Transferring source package to the server..."
_srcpkg=("$SRCPKGDEST"/*"$SRCEXT")
srcpkg="${_srcpkg[0]}"
rsync "${RSYNC_OPTS[@]}" -- "$srcpkg" "$server":"$remote_temp" || die
# Prepare the srcpkg on the server
msg "Extracting srcpkg"
ssh "${SSH_OPTS[@]}" -- "$server" "cd ${remote_temp@Q} && bsdtar --strip-components 1 -xvf $(basename "$srcpkg")" || die
# Run the build command on the server
msg "Running aur chroot ${chroot_args[*]}"
# shellcheck disable=SC2145
if ssh "${SSH_OPTS[@]}" -t -- "$server" "cd ${remote_temp@Q} && ${aur_chroot_cmd[@]@Q}"; then
msg "Build complete"
# Get an array of files that should be downloaded from the server
mapfile -t files < <(
ssh "${SSH_OPTS[@]}" -- "$server" "
cd ${remote_temp@Q}"' &&
while read -r file; do
[[ -f "${file}" ]] && printf "%s\n" "${file}" ||:
done < <(SRCDEST="" PKGDEST="" LOGDEST="" makepkg --packagelist) &&
printf "%s\n" '"${remote_temp@Q}/PKGBUILD"'
find '"${remote_temp@Q}"' -name "*.log"
')
else
# Build failed, only the logs should be downloaded from the server
mapfile -t files < <(
ssh "${SSH_OPTS[@]}" -- "$server" '
find '"${remote_temp@Q}"' -name "*.log"
')
fi
if (( ${#files[@]} )); then
msg 'Downloading files...'
rsync "${RSYNC_OPTS[@]}" -- "${files[@]/#/$server:}" "${TEMPDIR}/" || die
if is_globfile "${TEMPDIR}"/*.log; then
# shellcheck disable=SC2031
mv "${TEMPDIR}"/*.log "${LOGDEST:-${PWD}}/"
fi
if is_globfile "${TEMPDIR}"/*.pkg.tar*; then
# Building a package may change the PKGBUILD during update_pkgver
# shellcheck disable=SC2031
mv "${TEMPDIR}/PKGBUILD" "${PWD}/"
# shellcheck disable=SC2031
mv "${TEMPDIR}"/*.pkg.tar* "${PKGDEST:-${PWD}}/"
else
# shellcheck disable=SC2031
error "Build failed, check logs in ${LOGDEST:-${PWD}}"
exit 1
fi
# auto generate .SRCINFO
# shellcheck disable=SC2119
write_srcinfo_file
msg "Removing remote temporary directory $remote_temp"
ssh "${SSH_OPTS[@]}" -- "$server" "rm -rf -- ${remote_temp@Q}"
else
exit 1
fi
================================================
FILE: backup-system.sh
================================================
#!/bin/bash
# exit on first error
set -e
backupdir="/media/WD-black/backups"
# check if destination dir exists
if [[ ! -d "$backupdir" ]]; then
echo "Backup directory $backupdir does not exist. Is the drive mounted?"
exit 1
fi
#homedir="$backupdir/home_rsync_copy"
#rootdir="$backupdir/root_rsync_copy"
#echo "Syncing / to $rootdir (root permissions required)"
#sudo rsync / "$rootdir" -aPhAHX --info=progress2,name0,stats2 --delete --exclude={"/dev/*","/proc/*","/sys/*","/tmp/*","/run/*","/mnt/*","/media/*","/lost+found","/home","/swapfile","/.snapshots"}
#echo "Syncing ~/ to $homedir"
#rsync ~/ $homedir -aPhAHX --one-file-system --info=progress2,name0,stats2 --delete
# TODO:
# - make snapshot with snapper just before btrfs-sync
# - run `sync` before btrfs-sync to make sure that the snapshot is fully written to the disk
# - copy the snapper metadata files (info.xml)
# - make snapshots of the remaining subvolumes: @postgres @nspawn_containers @var_log
echo "Syncing /.snapshots to $backupdir/root (root permissions required)"
sudo btrfs-sync --verbose --delete /.snapshots "$backupdir/root"
echo "Syncing /home/.snapshots to $backupdir/home (root permissions required)"
sudo btrfs-sync --verbose --delete /home/.snapshots "$backupdir/home"
================================================
FILE: batmanpager
================================================
#!/bin/sh
# mandoc passes a file name, other tools write to stdout
# using `cat "$@"` we take care of both reading from file and stdin
# https://github.com/sharkdp/bat/issues/1145#issuecomment-1743518097
exec cat "$@" | col -bx | bat --language man --style plain --pager "$PAGER"
================================================
FILE: bsnap.sh
================================================
#! /usr/bin/bash
# exit on first error
set -e
backupdir="$HOME/_backup_snapshots"
usage() {
echo $@ >&2
echo "Usage: $0 {snapshot|transfer} ...
snapshot Create snapshots for every subvolume configured in '\$backupdir/*'.
The subvolume is specified by a symlink '\$backupdir/*/cur'
pointing to a Btrfs subvolume.
transfer <dst> Transfer all snapshots from '\$backupdir/*/' to '<dst>/', which
should be other Btrfs partition. The tree structure is kept
intact.
\$backupdir is set to '$backupdir'
" >&2
}
transfer() {
src="$1" # e.g. ~/_backup_snapshots/Bbox/
dst="$2" # e.g. /media/WD1T/backup-lahwaacz/Bbox/
[[ ! -d "$dst" ]] && mkdir "$dst"
# get list of snapshots to transfer
src_snapshots=($(find "$src" -mindepth 1 -maxdepth 1 -type d | sort))
_len=${#src_snapshots[@]}
for ((i=0; i<$_len; i++)); do
if [[ -e "$dst/$(basename ${src_snapshots[$i]})" ]]; then
# nothing to transfer
echo "Snapshot '$dst/$(basename ${src_snapshots[$i]})' already exists"
continue
fi
# There is currently an issue that the snapshots to be used with "btrfs send"
# must be physically on the disk, or you may receive a "stale NFS file handle"
# error. This is accomplished by "sync" after the snapshot
#
# ref: http://marc.merlins.org/perso/btrfs/post_2014-03-22_Btrfs-Tips_-Doing-Fast-Incremental-Backups-With-Btrfs-Send-and-Receive.html
sync
dst_snapshots=($(find "$dst" -mindepth 1 -maxdepth 1 -type d | sort))
if [[ $i -eq 0 ]]; then
# no parent, make initial transfer
sudo sh -c "btrfs send ${src_snapshots[$i]} | btrfs receive $dst"
else
sudo sh -c "btrfs send -p ${src_snapshots[(($i-1))]} ${src_snapshots[$i]} | btrfs receive $dst"
fi
done
}
case $1 in
snapshot)
for dir in "$backupdir"/*; do
if [[ -L "$dir/cur" ]]; then
btrfs subvolume snapshot -r $(realpath "$dir/cur") "$dir/$(date +%F-%T)"
else
echo "$dir/cur does not exist or is not a symlink"
fi
done
;;
transfer)
[ -n "$2" -a -d "$2" ] || usage "Invalid destination path"
for dir in "$backupdir"/*; do
transfer "$dir" "$2"/$(basename "$dir")
done
;;
*)
usage "Incorrect invocation"
esac
================================================
FILE: btrfs-diff
================================================
#!/bin/bash
# Author: http://serverfault.com/users/96883/artfulrobot
# License: Unknown
#
# This script will show most files that got modified or added.
# Renames and deletions will not be shown.
# Read limitations on:
# http://serverfault.com/questions/399894/does-btrfs-have-an-efficient-way-to-compare-snapshots
#
# btrfs send is the best way to do this long term, but as of kernel
# 3.14, btrfs send cannot just send a list of changed files without
# scanning and sending all the changed data blocks along.
usage() { echo $@ >&2; echo "Usage: $0 <older-snapshot> <newer-snapshot>" >&2; exit 1; }
[ $# -eq 2 ] || usage "Incorrect invocation";
SNAPSHOT_OLD=$1;
SNAPSHOT_NEW=$2;
[ -d $SNAPSHOT_OLD ] || usage "$SNAPSHOT_OLD does not exist";
[ -d $SNAPSHOT_NEW ] || usage "$SNAPSHOT_NEW does not exist";
OLD_TRANSID=`btrfs subvolume find-new "$SNAPSHOT_OLD" 9999999`
OLD_TRANSID=${OLD_TRANSID#transid marker was }
[ -n "$OLD_TRANSID" -a "$OLD_TRANSID" -gt 0 ] || usage "Failed to find generation for $SNAPSHOT_NEW"
btrfs subvolume find-new "$SNAPSHOT_NEW" $OLD_TRANSID | sed '$d' | cut -f17- -d' ' | sort | uniq
================================================
FILE: btrfs-sync
================================================
#!/bin/bash
#
# Simple script that synchronizes BTRFS snapshots locally.
# Features compression, retention policy and automatic incremental sync
#
set -e
set -o pipefail
set -o errtrace
print_usage() {
echo "Usage:
$BIN [options] <src> [<src>...] <dir>
-k|--keep NUM keep only last <NUM> sync'ed snapshots
-d|--delete delete snapshots in <dst> that don't exist in <src>
-q|--quiet don't display progress
-v|--verbose display more information
-h|--help show usage
<src> can either be a single snapshot, or a folder containing snapshots
"
}
echov() { if [[ "$VERBOSE" == 1 ]]; then echo "$@"; fi }
#----------------------------------------------------------------------------------------------------------
# preliminary checks
BIN="${0##*/}"
[[ $# -lt 2 ]] && { print_usage ; exit 1; }
[[ ${EUID} -ne 0 ]] && { echo "Must be run as root. Try 'sudo $BIN'"; exit 1; }
# parse arguments
KEEP=0
OPTS=$( getopt -o hqzZk:p:dv -l quiet -l help -l keep: -l delete -l verbose -- "$@" 2>/dev/null )
[[ $? -ne 0 ]] && { echo "error parsing arguments"; exit 1; }
eval set -- "$OPTS"
while true; do
case "$1" in
-h|--help ) print_usage; exit 0 ;;
-q|--quiet ) QUIET=1 ; shift 1 ;;
-d|--delete ) DELETE=1 ; shift 1 ;;
-k|--keep ) KEEP=$2 ; shift 2 ;;
-v|--verbose) VERBOSE=1 ; shift 1 ;;
--) shift; break ;;
esac
done
# detect src and dst arguments
SRC=( "${@:1:$#-1}" )
DST="${@: -1}"
test -x "$SRC" &>/dev/null || {
echo "Access error. Do you have adequate permissions for $SRC?"
exit 1
}
test -x "$DST" &>/dev/null || {
echo "Access error. Do you have adequate permissions for $DST?"
exit 1
}
#----------------------------------------------------------------------------------------------------------
# more checks
## don't overlap
if pgrep -F /run/btrfs-sync.pid &>/dev/null; then
echo "$BIN is already running"
exit 1
fi
echo $$ > /run/btrfs-sync.pid
## src checks
echov "* Check source"
SRCS=()
SRCS_BASE=()
for s in "${SRC[@]}"; do
src="$(realpath "$s")"
if ! test -e "$src"; then
echo "$s not found"
exit 1
fi
# check if the src is a read-only subvolume
if btrfs subvolume show "$src" &>/dev/null && [[ "$(btrfs property get -ts "$src")" == "ro=true" ]]; then
SRCS+=("$src")
SRCS_BASE+=("$src")
else
for dir in $( find "$src" -maxdepth 2 -type d ); do
# check if the src is a read-only subvolume
if btrfs subvolume show "$dir" &>/dev/null && [[ "$(btrfs property get -ts "$dir")" == "ro=true" ]]; then
SRCS+=("$dir")
SRCS_BASE+=("$src")
fi
done
fi
done
if [[ ${#SRCS[@]} -eq 0 ]]; then
echo "no BTRFS subvolumes found"
exit 1
fi
## use 'pv' command if available
PV=( pv -F"time elapsed [%t] | rate %r | total size [%b]" )
if [[ "$QUIET" == "1" ]]; then
PV=( cat )
else
if ! type pv &>/dev/null; then
echo "INFO: install the 'pv' package in order to get a progress indicator"
PV=( cat )
fi
fi
#----------------------------------------------------------------------------------------------------------
# sync snapshots
get_dst_snapshots() { # sets DSTS DST_UUIDS
local DST="$1"
DSTS=()
DST_UUIDS=()
for dir in $( find "$DST" -maxdepth 2 -type d ); do
if btrfs subvolume show "$dir" &>/dev/null; then
local UUID=$( btrfs subvolume show "$dir" 2>/dev/null | grep 'Received UUID' | awk '{ print $3 }' )
if [[ "$UUID" != "-" ]] && [[ "$UUID" != "" ]]; then
DSTS+=("$dir")
DST_UUIDS+=("$UUID")
fi
fi
done
}
choose_seed() { # sets SEED
local SRC="$1"
local SRC_BASE="$2"
SEED="$SEED_NEXT"
if [[ "$SEED" == "" ]]; then
# try to get most recent src snapshot that exists in dst to use as a seed
local RXID_CALCULATED=0
declare -A PATH_RXID DATE_RXID SHOWP RXIDP DATEP
local LIST="$( btrfs subvolume list -su "$SRC" )"
local SEED_CANDIDATES=()
for id in "${DST_UUIDS[@]}"; do
# try to match by UUID
local PATH_=$( awk "{ if ( \$14 == \"$id\" ) print \$16 }" <<<"$LIST" )
local DATE=$( awk "{ if ( \$14 == \"$id\" ) print \$11, \$12 }" <<<"$LIST" )
# try to match by received UUID, only if necessary
if [[ "$PATH_" == "" ]]; then
if [[ "$RXID_CALCULATED" == "0" ]]; then # create table during the first iteration if needed
local PATHS=( $( btrfs subvolume list -u "$SRC" | awk '{ print $11 }' ) )
for p in "${PATHS[@]}"; do
SHOWP="$( btrfs subvolume show "$( dirname "$SRC" )/$( basename "$p" )" 2>/dev/null )"
RXIDP="$( grep 'Received UUID' <<<"$SHOWP" | awk '{ print $3 }' )"
DATEP="$( grep 'Creation time' <<<"$SHOWP" | awk '{ print $3, $4 }' )"
[[ "$RXIDP" == "" ]] && continue
PATH_RXID["$RXIDP"]="$p"
DATE_RXID["$RXIDP"]="$DATEP"
done
RXID_CALCULATED=1
fi
PATH_="${PATH_RXID["$id"]}"
DATE="${DATE_RXID["$id"]}"
fi
if [[ "$PATH_" == "" ]] || [[ "$PATH_" == "$( basename "$SRC" )" ]]; then
continue
fi
# if the path does not exist, it is likely relative to the root subvolume
# rather than the mounted subvolume
if ! test -d "$PATH_" && mountpoint -q "$SRC_BASE"; then
local SRC_BASE_SUBVOL=$(findmnt -n -o OPTIONS "$SRC_BASE" | tr "," "\n" | grep "subvol=" | awk -F '=' '{ print $2 }')
# drop the leading slash
SRC_BASE_SUBVOL="${SRC_BASE_SUBVOL#/}"
# replace the prefix in $PATH_
if [[ "$PATH_" =~ "$SRC_BASE_SUBVOL"* ]]; then
PATH_="${PATH_#${SRC_BASE_SUBVOL}}"
PATH_="$SRC_BASE/$PATH_"
fi
fi
local SECS=$( date -d "$DATE" +"%s" )
SEED_CANDIDATES+=("$SECS|$PATH_")
done
SEED=$(IFS=$'\n' echo "${SEED_CANDIDATES[@]}" | sort -V | tail -1 | cut -f2 -d'|')
fi
}
exists_at_dst() {
local SHOW="$( btrfs subvolume show "$SRC" )"
local SRC_UUID="$( grep 'UUID:' <<< "$SHOW" | head -1 | awk '{ print $2 }' )"
grep -q "$SRC_UUID" <<<"${DST_UUIDS[@]}" && return 0;
local SRC_RXID="$( grep 'Received UUID' <<< "$SHOW" | awk '{ print $3 }' )"
grep -q "^-$" <<<"$SRC_RXID" && return 1;
grep -q "$SRC_RXID" <<<"${DST_UUIDS[@]}" && return 0;
return 1
}
## sync incrementally
sync_snapshot() {
local SRC="$1"
local SRC_BASE="$2"
if ! test -d "$SRC" || ! test -d "$SRC_BASE"; then
return
fi
if exists_at_dst "$SRC"; then
echov "* Skip existing '$SRC'"
return 0
fi
choose_seed "$SRC" "$SRC_BASE" # sets SEED
echo "SEED=$SEED"
# incremental sync argument
if [[ "$SEED" != "" ]]; then
if test -d "$SEED"; then
# Sends the difference between the new snapshot and old snapshot to the
# backup location. Using the -c flag instead of -p tells it that there
# is an identical subvolume to the old snapshot at the receiving
# location where it can get its data. This helps speed up the transfer.
local SEED_ARG=( -c "$SEED" )
else
echo "INFO: couldn't find $SEED. Non-incremental mode"
fi
fi
# destination path where the subvolume will be sent
local DST_SUBVOL="$DST/$( realpath --relative-to "$SRC_BASE" "$SRC" )"
if test -d "$DST_SUBVOL"; then
echo "ERROR: destination directory $DST_SUBVOL already exists, but was not detected as a Btrfs subvolume." >&2
return 1
fi
# create the parent directory at destination
mkdir -p "$(dirname "$DST_SUBVOL")"
# print info
echo -n "* Synchronizing '$SRC' to '$DST_SUBVOL'"
if [[ "$SEED" != "" ]]; then
echov -n " using seed '$SEED'"
fi
echo "..."
# do it
btrfs send -q "${SEED_ARG[@]}" "$SRC" \
| "${PV[@]}" \
| btrfs receive "$(dirname "$DST_SUBVOL")" 2>&1 \
| (grep -v -e'^At subvol ' -e'^At snapshot ' || true) \
|| {
btrfs subvolume delete "$DST_SUBVOL" 2>/dev/null
return 1;
}
# update DST list
DSTS+=("$DST_SUBVOL")
DST_UUIDS+=("$SRC_UUID")
SEED_NEXT="$SRC"
}
#----------------------------------------------------------------------------------------------------------
# sync all snapshots found in src
echov "* Check destination"
get_dst_snapshots "$DST" # sets DSTS DST_UUIDS
for (( i=0; i<"${#SRCS[@]}"; i++ )); do
src="${SRCS[$i]}"
src_base="${SRCS_BASE[$i]}"
sync_snapshot "$src" "$src_base" && RET=0 || RET=1
# for i in 1 2; do
# [[ "$RET" != "1" ]] && break
# echo "* Retrying '$src'..."
# sync_snapshot "$src" && RET=0 || RET=1
# done
if [[ "$RET" == "1" ]]; then
echo "Abort"
exit 1
fi
done
#----------------------------------------------------------------------------------------------------------
# retention policy
if [[ "$KEEP" != 0 ]] && [[ ${#DSTS[@]} -gt $KEEP ]]; then
echo "* Pruning old snapshots..."
for (( i=0; i < $(( ${#DSTS[@]} - KEEP )); i++ )); do
PRUNE_LIST+=( "${DSTS[$i]}" )
done
btrfs subvolume delete "${PRUNE_LIST[@]}"
fi
# delete flag
if [[ "$DELETE" == 1 ]]; then
for dst in "${DSTS[@]}"; do
FOUND=0
# for src in "${SRCS[@]}"; do
for (( i=0; i<"${#SRCS[@]}"; i++ )); do
src="${SRCS[$i]}"
echo "checking $src"
if [[ "$( basename $src )" == "$( basename $dst )" ]]; then
FOUND=1
break
fi
done
if [[ "$FOUND" == 0 ]]; then
DEL_LIST+=( "$dst" )
fi
done
if [[ "$DEL_LIST" != "" ]]; then
echo "* Deleting non existent snapshots..."
btrfs subvolume delete "${DEL_LIST[@]}"
fi
fi
================================================
FILE: btrfs-sync-WIP
================================================
#!/bin/bash
set -o errtrace
version="0.0"
name="btrfs-sync"
SNAPPER_CONFIG=/etc/conf.d/snapper
TMPDIR=$(mktemp -d)
PIPE=$TMPDIR/$name.out
mkfifo $PIPE
systemd-cat -t "$name" < $PIPE &
exec 3>$PIPE
donotify=0
which notify-send &> /dev/null
if [[ $? -ne 0 ]]; then
donotify=1
fi
error() {
printf "==> ERROR: %s\n" "$@"
notify_error 'Error' 'Check journal for more information.'
} >&2
die() {
error "$@"
exit 1
}
traperror() {
printf "Exited due to error on line %s.\n" $1
printf "exit status: %s\n" "$2"
printf "command: %s\n" "$3"
printf "bash line: %s\n" "$4"
printf "function name: %s\n" "$5"
exit 1
}
trapkill() {
die "Exited due to user intervention."
}
trap 'traperror ${LINENO} $? "$BASH_COMMAND" $BASH_LINENO "${FUNCNAME[@]}"' ERR
trap trapkill SIGTERM SIGINT
usage() {
cat <<EOF
$name $version
Usage: $name [options]
Options:
-c, --config <config> snapper configuration to backup
-d, --description <desc> snapper description
-h, --help print this message
-n, --noconfirm do not ask for confirmation
-q, --quiet do not send notifications; instead print them.
-s, --subvolid <subvlid> subvolume id of the mounted BTRFS subvolume to back up to
-u, --UUID <UUID> UUID of the mounted BTRFS subvolume to back up to
See 'man snap-sync' for more details.
EOF
}
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-d|--description)
description="$2"
shift 2
;;
-c|--config)
selected_configs="$2"
shift 2
;;
-u|--UUID)
uuid_cmdline="$2"
shift 2
;;
-s|--subvolid)
subvolid_cmdline="$2"
shift 2
;;
-n|--noconfirm)
noconfirm="yes"
shift
;;
-h|--help)
usage
exit 1
;;
-q|--quiet)
donotify=1
shift
;;
*)
die "Unknown option: '$key'. Run '$name -h' for valid options."
;;
esac
done
notify() {
for u in $(users | tr ' ' '\n' | sort -u); do
sudo -u $u DISPLAY=:0 \
DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$(sudo -u $u id -u)/bus \
notify-send -a $name "$1" "$2" --icon="dialog-$3"
done
}
notify_info() {
if [[ $donotify -eq 0 ]]; then
notify "$1" "$2" "information"
else
printf "$1: $2\n"
fi
}
notify_error() {
if [[ $donotify -eq 0 ]]; then
notify "$1" "$2" "error"
else
printf "$1: $2\n"
fi
}
[[ $EUID -ne 0 ]] && die "Script must be run as root. See '$name -h' for a description of options"
! [[ -f $SNAPPER_CONFIG ]] && die "$SNAPPER_CONFIG does not exist."
description=${description:-"latest incremental backup"}
uuid_cmdline=${uuid_cmdline:-"none"}
subvolid_cmdline=${subvolid_cmdline:-"5"}
noconfirm=${noconfirm:-"no"}
if [[ "$uuid_cmdline" != "none" ]]; then
notify_info "Backup started" "Starting backups to $uuid_cmdline subvolid=$subvolid_cmdline..."
else
notify_info "Backup started" "Starting backups. Use command line menu to select disk."
fi
if [[ "$(findmnt -n -v --target / -o FSTYPE)" == "btrfs" ]]; then
EXCLUDE_UUID=$(findmnt -n -v -t btrfs --target / -o UUID)
TARGETS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $2}')
UUIDS=$(findmnt -n -v -t btrfs -o UUID,TARGET --list | grep -v $EXCLUDE_UUID | awk '{print $1}')
else
TARGETS=$(findmnt -n -v -t btrfs -o TARGET --list)
UUIDS=$(findmnt -n -v -t btrfs -o UUID --list)
fi
declare -a TARGETS_ARRAY
declare -a UUIDS_ARRAY
declare -a SUBVOLIDS_ARRAY
i=0
for x in $TARGETS; do
SUBVOLIDS_ARRAY[$i]=$(btrfs subvolume show $x | awk '/Subvolume ID:/ { print $3 }')
TARGETS_ARRAY[$i]=$x
i=$((i+1))
done
i=0
disk=-1
disk_count=0
for x in $UUIDS; do
UUIDS_ARRAY[$i]=$x
if [[ "$x" == "$uuid_cmdline" && ${SUBVOLIDS_ARRAY[$((i))]} == "$subvolid_cmdline" ]]; then
disk=$i
disk_count=$(($disk_count+1))
fi
i=$((i+1))
done
if [[ "${#UUIDS_ARRAY[$@]}" -eq 0 ]]; then
die "No external btrfs subvolumes found to backup to. Run '$name -h' for more options."
fi
if [[ "$disk_count" > 1 ]]; then
printf "Multiple mount points were found with UUID %s and subvolid %s.\n" "$uuid_cmdline" "$subvolid_cmdline"
disk="-1"
fi
if [[ "$disk" == -1 ]]; then
if [[ "$disk_count" == 0 && "$uuid_cmdline" != "none" ]]; then
error "A device with UUID $uuid_cmdline and subvolid $subvolid_cmdline was not found to be mounted, or it is not a BTRFS device."
fi
printf "Select a mounted BTRFS device on your local machine to backup to.\nFor more options, exit and run '$name -h'.\n"
while [[ $disk -lt 0 || $disk -gt $i ]]; do
for x in "${!TARGETS_ARRAY[@]}"; do
printf "%4s) %s (uuid=%s, subvolid=%s)\n" "$((x+1))" "${TARGETS_ARRAY[$x]}" "${UUIDS_ARRAY[$x]}" "${SUBVOLIDS_ARRAY[$x]}"
done
printf "%4s) Exit\n" "0"
read -e -r -p "Enter a number: " disk
if ! [[ $disk == ?(-)+([0-9]) ]] || [[ $disk -lt 0 || $disk -gt $i ]]; then
printf "\nNo disk selected. Select a disk to continue.\n"
disk=-1
fi
done
if [[ $disk == 0 ]]; then
exit 0
fi
disk=$(($disk-1))
fi
selected_subvolid="${SUBVOLIDS_ARRAY[$((disk))]}"
selected_uuid="${UUIDS_ARRAY[$((disk))]}"
selected_mnt="${TARGETS_ARRAY[$((disk))]}"
printf "\nYou selected the disk with uuid=%s, subvolid=%s.\n" "$selected_uuid" "$selected_subvolid" | tee $PIPE
printf "The disk is mounted at '%s'.\n" "$selected_mnt" | tee $PIPE
source $SNAPPER_CONFIG
if [[ -z $selected_configs ]]; then
printf "\nInteractively cycling through all snapper configurations...\n"
fi
selected_configs=${selected_configs:-$SNAPPER_CONFIGS}
declare -a BACKUPDIRS_ARRAY
declare -a MYBACKUPDIR_ARRAY
declare -a OLD_NUM_ARRAY
declare -a OLD_SNAP_ARRAY
declare -a NEW_NUM_ARRAY
declare -a NEW_SNAP_ARRAY
declare -a NEW_INFO_ARRAY
declare -a BACKUPLOC_ARRAY
declare -a CONT_BACKUP_ARRAY
# Initial configuration of where backup directories are
i=0
for x in $selected_configs; do
if [[ "$(snapper -c $x list -t single | awk '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {cnt++} END {print cnt}')" -gt 1 ]]; then
error "More than one snapper entry found with UUID $selected_uuid subvolid $selected_subvolid for configuration $x. Skipping configuration $x."
continue
fi
if [[ "$(snapper -c $x list -t single | awk '/'$name' backup in progress/ {cnt++} END {print cnt}')" -gt 0 ]]; then
printf "\nNOTE: Previous failed %s backup snapshots found for '%s'.\n" "$name" "$x" | tee $PIPE
if [[ $noconfirm == "yes" ]]; then
printf "'noconfirm' option passed. Failed backups will not be deleted.\n" | tee $PIPE
else
read -e -r -p "Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N]? " delete_failed
while [[ -n "$delete_failed" && "$delete_failed" != [Yy]"es" &&
"$delete_failed" != [Yy] && "$delete_failed" != [Nn]"o" &&
"$delete_failed" != [Nn] ]]; do
read -e -r -p "Delete failed backup snapshot(s)? (These local snapshots from failed backups are not used.) [y/N] " delete_failed
if [[ -n "$delete_failed" && "$delete_failed" != [Yy]"es" &&
"$delete_failed" != [Yy] && "$delete_failed" != [Nn]"o" &&
"$delete_failed" != [Nn] ]]; then
printf "Select 'y' or 'N'.\n"
fi
done
if [[ "$delete_failed" == [Yy]"es" || "$delete_failed" == [Yy] ]]; then
snapper -c $x delete $(snapper -c $x list | awk '/'$name' backup in progress/ {print $1}')
fi
fi
fi
SNAP_SYNC_EXCLUDE=no
if [[ -f "/etc/snapper/configs/$x" ]]; then
source /etc/snapper/configs/$x
# TODO: snapper -c "$x" --jsonout get-config
else
die "Selected snapper configuration $x does not exist."
fi
if [[ $SNAP_SYNC_EXCLUDE == "yes" ]]; then
continue
fi
printf "\n"
old_num=$(snapper -c "$x" list -t single | awk '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {print $1}')
old_snap=$SUBVOLUME/.snapshots/$old_num/snapshot
OLD_NUM_ARRAY[$i]=$old_num
OLD_SNAP_ARRAY[$i]=$old_snap
if [[ -z "$old_num" ]]; then
printf "No backups have been performed for '%s' on this disk.\n" "$x"
read -e -r -p "Enter name of subvolume to store backups, relative to $selected_mnt (to be created if not existing): " mybackupdir
printf "This will be the initial backup for snapper configuration '%s' to this disk. This could take awhile.\n" "$x"
BACKUPDIR="$selected_mnt/$mybackupdir"
test -d "$BACKUPDIR" || btrfs subvolume create "$BACKUPDIR"
else
mybackupdir=$(snapper -c "$x" list -t single | awk -F"|" '/'"subvolid=$selected_subvolid, uuid=$selected_uuid"'/ {print $5}' | awk -F "," '/backupdir/ {print $1}' | awk -F"=" '{print $2}')
BACKUPDIR="$selected_mnt/$mybackupdir"
test -d $BACKUPDIR || die "%s is not a directory on %s.\n" "$BACKUPDIR" "$selected_uuid"
fi
BACKUPDIRS_ARRAY[$i]="$BACKUPDIR"
MYBACKUPDIR_ARRAY[$i]="$mybackupdir"
printf "Creating new local snapshot for '%s' configuration...\n" "$x" | tee $PIPE
# new_num=$(snapper -c "$x" create --print-number -d "$name backup in progress")
new_num=TODO
new_snap=$SUBVOLUME/.snapshots/$new_num/snapshot
new_info=$SUBVOLUME/.snapshots/$new_num/info.xml
sync
backup_location=$BACKUPDIR/$x/$new_num/
printf "Will backup %s to %s\n" "$new_snap" "$backup_location/snapshot" | tee $PIPE
if (test -d "$backup_location/snapshot") ; then
printf "WARNING: Backup directory '%s' already exists. This configuration will be skipped!\n" "$backup_location/snapshot" | tee $PIPE
printf "Move or delete destination directory and try backup again.\n" | tee $PIPE
fi
NEW_NUM_ARRAY[$i]="$new_num"
NEW_SNAP_ARRAY[$i]="$new_snap"
NEW_INFO_ARRAY[$i]="$new_info"
BACKUPLOC_ARRAY[$i]="$backup_location"
cont_backup="K"
CONT_BACKUP_ARRAY[$i]="yes"
if [[ $noconfirm == "yes" ]]; then
cont_backup="yes"
else
while [[ -n "$cont_backup" && "$cont_backup" != [Yy]"es" &&
"$cont_backup" != [Yy] && "$cont_backup" != [Nn]"o" &&
"$cont_backup" != [Nn] ]]; do
read -e -r -p "Proceed with backup of '$x' configuration [Y/n]? " cont_backup
if [[ -n "$cont_backup" && "$cont_backup" != [Yy]"es" &&
"$cont_backup" != [Yy] && "$cont_backup" != [Nn]"o" &&
"$cont_backup" != [Nn] ]]; then
printf "Select 'Y' or 'n'.\n"
fi
done
fi
if [[ "$cont_backup" != [Yy]"es" && "$cont_backup" != [Yy] && -n "$cont_backup" ]]; then
CONT_BACKUP_ARRAY[$i]="no"
printf "Not backing up '%s' configuration.\n" $x
# snapper -c $x delete $new_num
fi
i=$(($i+1))
done
# Actual backing up
printf "\nPerforming backups...\n" | tee $PIPE
i=-1
for x in $selected_configs; do
i=$(($i+1))
SNAP_SYNC_EXCLUDE=no
if [[ -f "/etc/snapper/configs/$x" ]]; then
source /etc/snapper/configs/$x
else
die "Selected snapper configuration $x does not exist."
fi
cont_backup=${CONT_BACKUP_ARRAY[$i]}
if [[ $cont_backup == "no" || $SNAP_SYNC_EXCLUDE == "yes" ]]; then
notify_info "Backup in progress" "NOTE: Skipping $x configuration."
continue
fi
notify_info "Backup in progress" "Backing up $x configuration."
printf "\n"
old_num="${OLD_NUM_ARRAY[$i]}"
old_snap="${OLD_SNAP_ARRAY[$i]}"
BACKUPDIR="${BACKUPDIRS_ARRAY[$i]}"
mybackupdir="${MYBACKUPDIR_ARRAY[$i]}"
new_num="${NEW_NUM_ARRAY[$i]}"
new_snap="${NEW_SNAP_ARRAY[$i]}"
new_info="${NEW_INFO_ARRAY[$i]}"
backup_location="${BACKUPLOC_ARRAY[$i]}"
if (test -d "$backup_location/snapshot") ; then
printf "ERROR: Backup directory '%s' already exists. Skipping backup of this configuration!\n" "$backup_location/snapshot" | tee $PIPE
continue
fi
mkdir -p $backup_location
if [[ -z "$old_num" ]]; then
printf "Sending first snapshot for '%s' configuration...\n" "$x" | tee $PIPE
# btrfs send "$new_snap" | btrfs receive "$backup_location" &>/dev/null
else
printf "Sending incremental snapshot for '%s' configuration...\n" "$x" | tee $PIPE
# Sends the difference between the new snapshot and old snapshot to the
# backup location. Using the -c flag instead of -p tells it that there
# is an identical subvolume to the old snapshot at the receiving
# location where it can get its data. This helps speed up the transfer.
# btrfs send -c "$old_snap" "$new_snap" | btrfs receive "$backup_location"
# printf "Modifying data for old local snapshot for '%s' configuration...\n" "$x" | tee $PIPE
# snapper -v -c "$x" modify -d "old snap-sync snapshot (you may remove)" -u "backupdir=,subvolid=,uuid=" -c "number" "$old_num"
fi
cp "$new_info" "$backup_location"
# It's important not to change this userdata in the snapshots, since that's how
# we find the previous one.
# userdata="backupdir=$mybackupdir, subvolid=$selected_subvolid, uuid=$selected_uuid"
# Tag new snapshot as the latest
# printf "Tagging local snapshot as latest backup for '%s' configuration...\n" "$x" | tee $PIPE
# snapper -v -c "$x" modify -d "$description" -u "$userdata" "$new_num"
printf "Backup complete for '%s' configuration.\n" "$x" > $PIPE
done
printf "\nDone!\n" | tee $PIPE
exec 3>&-
if [[ "$uuid_cmdline" != "none" ]]; then
notify_info "Finished" "Backups to $uuid_cmdline complete!"
else
notify_info "Finished" "Backups complete!"
fi
================================================
FILE: clean-aur-dir.py
================================================
#! /usr/bin/env python
import os
import sys
import re
import subprocess
pkgname_regex = re.compile("^(?P<pkgname>[a-z0-9@._+-]+)-(?P<pkgver>[a-z0-9._:-]+)-(?P<arch>any|x86_64|i686)\.pkg\.tar(\.xz)?(\.sig)?$", re.IGNORECASE)
def usage():
print("Simple utility to clean directories from old Arch's package files, keeping only those currently installed")
print("usage: %s PATH" % sys.argv[0])
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
path = sys.argv[1]
if not os.path.isdir(path):
usage()
os.chdir(path)
files = {}
# remove files that don't match pkgname_reges from further processing!!
for f in os.listdir():
if not os.path.isfile(f):
continue
match = re.match(pkgname_regex, f)
if match:
# strip extension for future comparison with expac's output
files[f] = "{pkgname}-{pkgver}-{arch}".format(**match.groupdict())
# get list of installed packages
installed = subprocess.check_output("expac -Qs '%n-%v-%a'", shell=True, universal_newlines=True).splitlines()
for f in sorted(files):
# compare with the key instead of the whole filename
# (drops file extensions like .pkg.tar.{xz,gz}{,.sig} )
ff = files[f]
if ff in installed:
print("Kept: %s" % f)
else:
print("Deleted: %s" % f)
os.remove(f)
================================================
FILE: convertToUtf8.py
================================================
#! /usr/bin/env python
import sys
import os
import traceback
CHARSETS = ("ascii", "cp1250", "cp1252", "iso-8859-9", "iso-8859-15")
def is_utf8(filepath):
try:
file = open(filepath, "rb")
file.read().decode('utf-8')
file.close()
return True
except:
return False
def to_utf8(path):
for charset in CHARSETS:
try:
f = open(path, 'rb')
content = f.read().decode(charset)
f.close()
f = open(path, 'wb')
f.write(content.encode('utf-8'))
f.close()
return "Converting to utf-8: " + os.path.split(path)[1]
except:
pass
return "Unable to open " + os.path.split(path)[1] + " - unknown charset or binary file."
def run():
message = ""
for filename in sys.argv[1:]:
if os.path.isfile(filename):
if is_utf8(filename):
message += os.path.split(filename)[1] + " is already in utf-8.\n"
else:
message += to_utf8(filename) + "\n"
return message.strip()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " file1 [file2 ...]")
sys.exit(1)
try:
message = run()
except:
message = traceback.format_exc()
if message != "":
print(message)
================================================
FILE: fatcp
================================================
#! /usr/bin/bash
# Script for safe copying to FAT32 filesystems.
# All bad characters are replaced by '_' (underscore) when copying.
# File conflicts (e.g. 'foo?' and 'foo:' are both mapped to 'foo_') are not checked - using 'cp -i' is recommended.
# Some resources:
# http://askubuntu.com/questions/11634/how-can-i-substitute-colons-when-i-rsync-on-a-usb-key
#
# Simple (stupid) alternative:
# find -type f -name '*.pat' -print0 | tar -c -f - --null --files-from - | tar -C /path/to/dst -v -x -f - --show-transformed --transform 's/?/_/g'
#
# two arguments are accepted
if [[ $# -ne 2 ]]; then
echo "Usage: $0 <src path> <dst path>"
exit 1
fi
base=$(realpath "$1")
basedir=$(dirname "$base")
dst=$(realpath "$2")
# $dst must be existing dir
if [[ ! -d "$dst" ]]; then
echo "Target directory '$dst' does not exist."
exit 1
fi
# 'cp' alias
CP="cp -i --preserve=all"
# characters that will be replaced with '_'
BADCHARS='<>|;:!?"*\+'
# enhance globbing
shopt -s dotglob globstar
# function creating target file/dir name
mk_target() {
local target=${1#"$basedir"}
echo "$dst/${target//[$BADCHARS]/_}"
}
# dirs and files are handled differently
if [[ -d "$base" ]]; then
target=$(mk_target "$base")
mkdir "$target"
for src in "$base"/**/*; do
target=$(mk_target "$src")
if [[ -d "$src" ]]; then
mkdir -p -- "$target"
elif [[ "$src" != "$target" ]]; then
$CP -- "$src" "$target"
fi
done
elif [[ -f "$base" ]]; then
target=$(mk_target "$base")
if [[ "$src" != "$target" ]]; then
$CP -- "$base" "$target"
fi
fi
================================================
FILE: ffparser.py
================================================
#! /usr/bin/env python
import argparse
from pythonscripts.ffparser import FFprobeParser
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="parse ffprobe's json output")
option = parser.add_mutually_exclusive_group(required=True)
option.add_argument("-a", "--audio", action="store_const", const="audio", dest="option", help="get audio attribute")
option.add_argument("-v", "--video", action="store_const", const="video", dest="option", help="get video attribute")
option.add_argument("-f", "--format", action="store_const", const="format", dest="option", help="get format attribute")
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument("-g", "--get", action="store", nargs=1, dest="attribute", help="attribute name to get")
action.add_argument("-p", "--print", action="store_true", dest="pprint", help="print all attributes and exit")
parser.add_argument("path", action="store", nargs=1, help="path to file to parse")
args = parser.parse_args()
ffparser = FFprobeParser(args.path[0])
if args.pprint:
ffparser.pprint(args.option)
else:
print(ffparser.get(args.option, args.attribute[0]))
================================================
FILE: fmount.py
================================================
#! /usr/bin/env python3
import argparse
import configparser
import os
import subprocess
import sys
from pathlib import Path
CONFIG = Path.home() / ".config" / "fmount.conf"
DEFAULT_MOUNTPATH = Path.home() / "mnt"
# we just strip spaces in the mntopts string
def reformat_mntopts(mntopts):
mntopts = mntopts.split(",")
options = []
for opt in mntopts:
options.append("=".join(tk.strip() for tk in opt.split("=")))
return ",".join(set(options))
def mount_gio(*, scheme: str, host: str, path: str, user: str, port: str, mountpoint: Path):
if mountpoint.exists() and not mountpoint.is_symlink():
print(f"Error: path {mountpoint} exists but is not a symlink", file=sys.stderr)
return
location = f"{scheme}://"
if user:
location += user + "@"
location += host
if port:
location += ":" + port
location += "/" + path
# get path to thet gvfs directory
XDG_RUNTIME_DIR = os.environ.get("XDG_RUNTIME_DIR")
if XDG_RUNTIME_DIR is None:
XDG_RUNTIME_DIR = f"/run/user/{os.getuid()}"
gvfs = Path(XDG_RUNTIME_DIR) / "gvfs"
# save current gvfs mounts
if gvfs.is_dir():
mounts_before = set(gvfs.glob(f"{scheme}-share:*"))
else:
mounts_before = set()
print(f"Mounting {location}")
cmd = ["gio", "mount", location]
subprocess.run(cmd, check=True)
if not gvfs.is_dir():
print(f"Error: gvfs directory {gvfs} does not exist", file=sys.stderr)
return
# detect the new gvfs mount symlink it to mountpoint
mounts_after = set(gvfs.glob(f"{scheme}-share:*"))
target = list(mounts_after - mounts_before)[0]
# hack for inaccessible parents of the path on smb servers
if scheme == "smb":
_path = Path(path.lstrip("/"))
# the first part is the remote share, the rest is the location we want
target /= _path.relative_to(_path.parts[0])
# create a symlink from mountpoint to gvfs target
mountpoint.symlink_to(target)
def mount_sshfs(*, host: str, path: str, user: str, port: str, mountpoint: Path, mntopts: str):
uhd = host + ":" + path
if user:
uhd = user + "@" + uhd
cmd = ["sshfs", uhd, str(mountpoint)]
if mntopts:
cmd += ["-o", mntopts]
if port:
cmd += ["-p", port]
print(f"Mounting at '{mountpoint}'...")
# the mountpoint might exist after an error or automatic unmount
mountpoint.mkdir(parents=True, exist_ok=True)
subprocess.run(cmd, check=True)
def mount(name, mountpath: Path, config):
mountpoint = mountpath / name
scheme = config.get(name, "scheme", fallback="sshfs")
host = config.get(name, "host", fallback=name)
path = config.get(name, "path", fallback="")
user = config.get(name, "user", fallback=None)
port = config.get(name, "port", fallback=None)
mntopts = config.get(name, "mntopts", fallback="")
mntopts = reformat_mntopts(mntopts)
if scheme == "sshfs":
# sshfs is *much* faster than gvfs
return mount_sshfs(
host=host,
path=path,
user=user,
port=port,
mountpoint=mountpoint,
mntopts=mntopts,
)
else:
return mount_gio(
scheme=scheme,
host=host,
path=path,
user=user,
port=port,
mountpoint=mountpoint,
)
def umount(mntpoint: Path):
if path.is_mount():
cmd = ["fusermount3", "-u", str(mntpoint)]
subprocess.run(cmd, check=True)
clean(mntpoint)
elif path.is_symlink():
if path.readlink().exists():
cmd = ["gio", "mount", "--unmount", str(mntpoint.resolve())]
subprocess.run(cmd, check=True)
# do not call clean(path), gio takes a while to remove the target
path.unlink()
elif path.is_dir():
print(f"Note: directory '{path}' is not a mount point.", file=sys.stderr)
return
def clean(path: Path):
if path.is_symlink() and not path.readlink().exists():
print(f"Removing broken symlink '{path}'...")
path.unlink()
else:
if not path.is_mount() and not any(path.iterdir()):
print(f"Removing empty mountpoint '{path}'...")
path.rmdir()
def cleanAll(mountpath):
for file in mountpath.iterdir():
path = mountpath / file
if path.is_dir():
clean(path)
def writeDefaultConfig():
with open(CONFIG, mode="w", encoding="utf-8") as cfile:
print(
f"""\
# globals live in the DEFAULT section
[DEFAULT]
mountpath = {DEFAULT_MOUNTPATH}
#mntopts = opt1=val1, opt2=val2, ... # optional
#[remote_name]
#scheme = ... # optional, either sshfs (default) or anything else supported by gvfs
#host = ... # optional, equal to remote_name by default
#path = ... # optional, sshfs defaults to remote $HOME
#user = ... # optional, .ssh/config is honoured
#port = ... # optional, .ssh/config is honoured
#mntopts = opt1=val1, opt2=val2, ... # optional
""",
file=cfile,
)
if __name__ == "__main__":
config = configparser.ConfigParser()
if not CONFIG.exists():
writeDefaultConfig()
config.read(CONFIG)
parser = argparse.ArgumentParser(
description="wrapper for sshfs and gio with a config file"
)
parser.add_argument(
"--list-available",
action="store_true",
help="list the hosts defined in the configuration file and exit",
)
parser.add_argument(
"--list-mounted",
action="store_true",
help="list the currently mounted hosts and exit",
)
parser.add_argument(
"-u", "--unmount", action="store_true", help="unmount given host or path"
)
parser.add_argument(
"host", nargs="*", help="remote name(s) specified in the config file"
)
args = parser.parse_args()
mountpath = Path(
os.path.expanduser(
config.get("DEFAULT", "mountpath", fallback=DEFAULT_MOUNTPATH)
)
)
if args.list_available:
hosts = set(key for key in config.keys() if key != "DEFAULT")
for host in sorted(hosts):
print(host)
elif args.list_mounted:
for file in sorted(mountpath.iterdir()):
print(file.name)
else:
if args.host:
for host in args.host:
if args.unmount:
if Path(host).is_dir():
# not a host, but a path
path = Path(host)
else:
path = mountpath / host
if not path.exists():
print(
f"Note: path '{path}' does not exist.", file=sys.stderr
)
umount(path)
else:
if config.has_section(host):
if (mountpath / host).is_mount():
parser.error(f"Host '{host}' is already mounted.")
mount(host, mountpath, config)
else:
parser.error(
f"Section '{host}' does not exist in the config file."
)
else:
parser.error("No hosts were given.")
cleanAll(mountpath)
================================================
FILE: forcemp3convert.sh
================================================
#! /bin/bash
# forcefully convert any file to mp3 (with fixed bitrate), preserving metadata (if possible)
set -e
for file in "$@"; do
tmpfile="$(mktemp -u)-forcemp3convert.mp3"
ffmpeg -i "$file" -acodec libmp3lame -ar 44100 -ab 128k -ac 2 -f mp3 -map_metadata 0 -y "$tmpfile"
mv "$tmpfile" "${file%\.*}.mp3"
done
================================================
FILE: hddtemp.sh
================================================
#!/bin/bash
devices="$@"
devices=${devices:-/dev/sda}
for device in $devices; do
cmd="smartctl -d ata -a $device | grep \"Temperature_Celsius\" | awk '{print \$10}'"
if [[ $UID != 0 ]]; then
echo "Running \`sudo $cmd\`"
temp=$(eval "sudo $cmd")
else
echo "Running \`$cmd\`"
temp=$(eval "$cmd")
fi
echo "Temperature of $device: $temp°C"
done
================================================
FILE: imap-notifier.py
================================================
#!/usr/bin/env python3
import asyncio
import email.header
import email.parser
import imaplib
import json
import logging
import os
import subprocess
import sys
from pathlib import Path
import jsonschema
import yaml
logger = logging.getLogger(__name__)
# Define the JSON schema for the configuration file
config_schema = {
"type": "object",
"required": ["accounts"],
"properties": {
"accounts": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"required": [
"username",
"hostname",
"password_command",
],
"properties": {
"username": {
"type": "string",
"minLength": 1,
},
"protocol": {
"type": "string",
"enum": ["imaps", "imap"],
},
"hostname": {
"type": "string",
"format": "hostname",
},
"port": {
"type": "integer",
"minimum": 1,
"maximum": 65535,
},
"password_command": {
"type": "string",
"minLength": 1,
},
"include_mailboxes": {
"type": "array",
"items": {"type": "string"},
"minItems": 1,
"uniqueItems": True,
},
"exclude_mailboxes": {
"type": "array",
"items": {"type": "string"},
"minItems": 1,
"uniqueItems": True,
},
},
"additionalProperties": False,
"allOf": [
{"not": {"required": ["include_mailboxes", "exclude_mailboxes"]}}
],
},
},
"timeout": {
"type": "integer",
"minimum": 30,
"maximum": 3600,
},
},
"additionalProperties": False,
}
def load_config(config_path: Path):
"""Load configuration from XDG_CONFIG_HOME/imap-notifier.yaml"""
try:
with open(config_path, "r") as f:
config = yaml.safe_load(f)
# Validate the configuration against the schema
jsonschema.validate(instance=config, schema=config_schema)
return config
except FileNotFoundError:
logger.error(f"Configuration file not found: {config_path}")
return None
except yaml.YAMLError as e:
logger.error(f"Error parsing configuration file: {e}")
return None
except jsonschema.ValidationError as e:
logger.error(f"Invalid configuration: {e}")
return None
# helper function to decode MIME-encoded headers
# https://docs.python.org/3/library/email.header.html#email.header.decode_header
def decode_header(header):
if header is None:
return None
parts = email.header.decode_header(header)
decoded = ""
for s, charset in parts:
if isinstance(s, str):
# already str - just append
decoded += s
else:
# byte string - needs to be decoded
if charset is None:
charset = "ascii"
decoded += str(s, encoding=charset)
return decoded
class IMAPNotifier:
xdg_config_home = os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
config_path = Path(xdg_config_home) / "imap-notifier.yaml"
xdg_state_home = os.environ.get(
"XDG_STATE_HOME", os.path.expanduser("~/.local/state")
)
state_file_path = Path(xdg_state_home) / "imap-notifier" / "state.json"
def __init__(self):
self.config = load_config(self.config_path)
self.state = {}
self.mail_connections = {}
self.shutdown_event = asyncio.Event()
def load_state(self):
"""Load last check times from state file"""
try:
with open(self.state_file_path, "r") as f:
self.state = json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return {}
def save_state(self):
"""Save last check times to state file"""
try:
self.state_file_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.state_file_path, "w") as f:
json.dump(self.state, f)
except Exception as e:
logger.error(f"Failed to save state: {e}")
def get_account_id(self, account_config):
"""Generate a unique ID for an account"""
username = account_config["username"]
protocol = account_config.get("protocol", "imaps")
hostname = account_config["hostname"]
if protocol == "imaps":
port = account_config.get("port", 993)
else:
port = account_config.get("port", 143)
return f"{protocol}://{username}@{hostname}:{port}"
def get_password(self, account_config):
"""Get password using the configured command"""
if "password_command" in account_config:
try:
result = subprocess.run(
account_config["password_command"],
shell=True,
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
logger.error(
f"Failed to get password for {account_config['username']}: {e}"
)
return None
else:
logger.error(
f"No password command configured for {account_config['username']}"
)
return None
def send_notification(self, message):
"""Send desktop notification for new email"""
try:
# Extract sender and subject
sender = decode_header(message.get("From")) or "[Unknown Sender]"
subject = decode_header(message.get("Subject")) or "[No Subject]"
subprocess.run(
[
"notify-send",
"--app-name=EmailNotification",
"--expire-time=3000", # duration in ms
"--urgency=normal", # critical would be shown forever
"--icon=mail-message-new-symbolic",
"--category=email.arrived",
"Received new email",
f"{sender} — {subject}",
],
check=True,
)
logger.info(f"Notification sent for email from: {sender}")
except subprocess.CalledProcessError as e:
logger.error(f"Failed to send notification: {e}")
except Exception as e:
logger.error(f"Unexpected error sending notification: {e}")
def is_connection_alive(self, connection, account_id):
"""Check if IMAP connection is still alive"""
try:
# Send a NOOP command to test the connection
connection.noop()
return True
except Exception:
logger.warning(f"Connection for account {account_id} is not alive")
return False
async def connect_to_account(self, account_config):
"""Establish IMAP connection for an account"""
protocol = account_config.get("protocol", "imaps")
hostname = account_config["hostname"]
try:
# Create connection based on whether it's secure (imaps) or not
if protocol == "imaps":
port = account_config.get("port", 993)
client = imaplib.IMAP4_SSL(hostname, port)
else:
port = account_config.get("port", 143)
client = imaplib.IMAP4(hostname, port)
# Get password
username = account_config["username"]
password = self.get_password(account_config)
if not password:
logger.error(f"No password returned for account {username:!r}")
return None
client.login(username, password)
logger.info(
f"Connected to {protocol}://{hostname}:{port} as user {username}"
)
return client
except Exception as e:
logger.error(f"Failed to connect to {protocol}://{hostname}: {e}")
return None
async def get_new_emails(self, connection, account_id, mailboxes_to_process):
"""Get new emails since last check"""
logger.debug(f"Checking {account_id} for new emails")
# Get previous unseen emails from the state
account_state = self.state.setdefault(account_id, {})
previous_unseen_message_ids = set(account_state.get("unseen_message_ids", []))
unseen_message_ids = set()
new_emails = []
# Process each mailbox
for mailbox in mailboxes_to_process:
try:
# Remove old state data
# TODO: remove this after some time
if mailbox in account_state:
del account_state[mailbox]
# Select mailbox
connection.select(mailbox)
# Search for unseen emails
status, messages = connection.search(None, "UNSEEN")
if status != "OK":
logger.error(
f"Failed to search emails in mailbox {mailbox} for account {account_id}"
)
continue
email_ids = messages[0].split()
# Process new emails
for email_id in email_ids:
try:
# Fetch the email headers only
status, msg_data = connection.fetch(email_id, "(RFC822.HEADER)")
if status == "OK":
msg = email.parser.Parser().parsestr(
msg_data[0][1].decode("utf-8", errors="ignore")
)
# Always get a Message-ID, which uniquely identifies the message.
# The `email_id` obtained from IMAP is just numeric identifier in the *mailbox*,
# not in the whole account.
message_id = msg.get("Message-ID")
unseen_message_ids.add(message_id)
if message_id not in previous_unseen_message_ids:
new_emails.append(msg)
except Exception as e:
logger.error(f"Failed to fetch email {email_id}: {e}")
continue
except Exception as e:
logger.error(
f"Error processing mailbox {mailbox} for account {account_id}: {e}"
)
# Update IDs of unseen emails in the state
account_state["unseen_message_ids"] = sorted(unseen_message_ids)
return new_emails
async def process_mailboxes(self, account_config, account_id, connection):
"""Process mailboxes for an account"""
# Determine which mailboxes to process
include_mailboxes = account_config.get("include_mailboxes", [])
exclude_mailboxes = account_config.get("exclude_mailboxes", [])
if include_mailboxes and exclude_mailboxes:
logger.error(
f"Both include_mailboxes and exclude_mailboxes are defined for account "
f"{account_id}. Please specify only one of them."
)
return
# If no mailboxes specified but exclude_mailboxes is defined,
# get all mailboxes from server and filter out excluded ones
if not include_mailboxes and exclude_mailboxes:
try:
# Get all mailboxes from server
status, mailbox_list = connection.list()
all_mailboxes = []
if status == "OK" and mailbox_list:
for item in mailbox_list:
# Extract mailbox name from LIST response
mailbox_name = item.decode().split(' "/" ')[-1].strip('"')
all_mailboxes.append(mailbox_name)
# Filter out excluded mailboxes
mailboxes_to_process = [
mb for mb in all_mailboxes if mb not in exclude_mailboxes
]
except Exception as e:
logger.error(f"Error retrieving mailboxes from server: {e}")
return
elif include_mailboxes:
# Use configured mailboxes
mailboxes_to_process = include_mailboxes
else:
# Fallback to INBOX
mailboxes_to_process = ["INBOX"]
# Get new emails
emails = await self.get_new_emails(connection, account_id, mailboxes_to_process)
# Send notifications for new emails
for message in emails:
self.send_notification(message)
async def process_account(self, account_config):
"""Process a single account"""
# Generate a unique ID for the account
account_id = self.get_account_id(account_config)
# Check if there's an existing connection for this account
connection = self.mail_connections.get(account_id)
try:
# If no connection exists or it's closed, create a new one
if not connection or not self.is_connection_alive(connection, account_id):
connection = await self.connect_to_account(account_config)
if not connection:
return
self.mail_connections[account_id] = connection
# Process mailboxes for this account
await self.process_mailboxes(account_config, account_id, connection)
logger.debug(f"Finished processing account {account_id}")
except Exception as e:
logger.error(f"Error processing account {account_id}: {e}")
# Remove failed connection from cache
if account_id in self.mail_connections:
del self.mail_connections[account_id]
async def run(self):
"""Run the notifier"""
if not self.config:
return False
timeout = int(self.config.get("timeout", 60))
logger.info(f"Starting mail notifier with timeout {timeout} seconds")
while not self.shutdown_event.is_set():
# Process all accounts concurrently
async with asyncio.TaskGroup() as tg:
for account in self.config["accounts"]:
tg.create_task(self.process_account(account))
# Save state after each cycle
self.save_state()
# Wait before next check
await asyncio.sleep(timeout)
async def main_async():
"""Async main function"""
# Create notifier
notifier = IMAPNotifier()
# Load existing state
notifier.load_state()
result = True
try:
# Run the notifier
result = await notifier.run()
except KeyboardInterrupt:
logger.info("Interrupted by user")
finally:
# Save final state
notifier.save_state()
logger.info("Notifier stopped")
if result is False:
sys.exit(1)
def main():
"""Main function"""
# Create event loop and run async main
asyncio.run(main_async())
if __name__ == "__main__":
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
main()
================================================
FILE: img2pdf.sh
================================================
#!/bin/bash
set -e
outfile=$1
ext=tif
echo "Converting images to pdf..."
declare -a pages
# take input pattern "anything_number.ext", sort numerically by "number"
for file in $(ls ./*.$ext | sort -t_ -k2,2n); do
echo " $file"
pdf=$(basename "$file" .$ext).pdf
# convert "$file" "$pdf"
tiff2pdf -z -F -x 300 -y 300 -o "$pdf" "$file"
pages+=("$pdf")
done
echo "Merging into one pdf..."
stapler sel "${pages[@]}" "$outfile"
================================================
FILE: initscreen.sh
================================================
#! /bin/bash
# exit on error
set -e
#hdmi=$(cat /sys/class/drm/card0-HDMI-A-1/status)
#vga=$(cat /sys/class/drm/card0-VGA-1/status)
if [[ "$WAYLAND_DISPLAY" == "" ]]; then
connected=$(xrandr | grep " connected" | sed -e "s/\([A-Z0-9]\+\) connected.*/\1/")
else
connected=$(swaymsg -pt get_outputs | grep -E "^Output" | awk '{print $2}')
fi
#echo "initscreen.sh: hdmi $hdmi; vga $vga"
if [[ $connected =~ "LVDS-0" ]]; then
if [[ $connected =~ "HDMI-0" ]]; then
# hdmi only
# NOTE: i3 fails if no active output is detected, so we have to first enable second output and then disable the first
# xrandr --nograb --output HDMI-0 --auto --primary
# xrandr --nograb --output LVDS-0 --off
# both
# HDMI-0 is primary, LVDS-0 is panned to be vertically aligned to the bottom
# xrandr --nograb --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312
xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --left-of HDMI-0 --panning 1366x768+0+312
# xrandr --output HDMI-0 --auto --primary --output LVDS-0 --auto --right-of HDMI-0
elif [[ $connected =~ "VGA-0" ]]; then
# xrandr --nograb --output VGA-0 --auto --output LVDS-0 --mode 1024x768 --primary
# TODO: look at --scale argument
xrandr --output VGA-0 --auto --primary --output LVDS-0 --auto --below VGA-0
else
# xrandr --nograb --output LVDS-0 --auto --primary --output HDMI-0 --off
# xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off
xrandr --output LVDS-0 --auto --primary --output HDMI-0 --off --output VGA-0 --off
fi
elif [[ $connected =~ "eDP-1" ]]; then
if [[ -f /proc/acpi/button/lid/LID/state ]]; then
lid=$(cat /proc/acpi/button/lid/LID/state | awk '{print $2}')
else
lid="open"
fi
if [[ "$WAYLAND_DISPLAY" == "" ]]; then
if [[ $connected =~ "HDMI-1" ]] && [[ "$lid" == "closed" ]]; then
xrandr --output HDMI-1 --auto --primary --output eDP-1 --off
echo "Xft.dpi: 96" | xrdb -merge
elif [[ $connected =~ "HDMI-1" ]]; then
xrandr --output HDMI-1 --auto --primary --output eDP-1 --auto --left-of HDMI-1
else
xrandr --output eDP-1 --auto --primary --output HDMI-1 --off
echo "Xft.dpi: 168" | xrdb -merge # scale=1.75
fi
else
if [[ $connected =~ "HDMI-A-1" ]] && [[ "$lid" == "closed" ]]; then
swaymsg output HDMI-A-1 enable
swaymsg output eDP-1 disable
elif [[ $connected =~ "HDMI-A-1" ]]; then
swaymsg output HDMI-A-1 enable
swaymsg output eDP-1 enable
else
swaymsg output eDP-1 enable
swaymsg output HDMI-A-1 disable
fi
fi
else
first=$(echo $connected | cut -f1 -d' ')
xrandr --output ${first} --auto --primary
fi
================================================
FILE: maildir-strip-attachments.py
================================================
#!/usr/bin/env python3
# Documentation:
# - https://docs.python.org/3/library/mailbox.html#mailbox.Maildir
# - https://docs.python.org/3/library/mailbox.html#mailbox.MaildirMessage
import os
import argparse
import mailbox
DROP_MIN_SIZE = 256 # KiB
DROP_CONTENT_TYPES = [
"image/",
"video/",
"application/pdf",
"application/x-extension-pdf",
"application/zip",
"application/gzip",
"application/x-gzip",
"application/x-xz",
"application/x-7z-compressed",
"application/x-zip-compressed",
"application/x-rar-compressed",
"application/x-msdownload",
"application/msword",
"application/vnd.ms-excel",
"application/vnd.ms-powerpoint",
"application/vnd.ms-xpsdocument",
"application/octet-stream",
]
def process_maildir(maildir):
dropped_items = 0
dropped_size = 0
mb = mailbox.Maildir(maildir, create=False)
for key, message in mb.iteritems():
for part in message.walk():
if part.is_multipart():
continue
size = len(part.as_bytes()) / 1024
if size > DROP_MIN_SIZE:
print("{}\tsize: {:g} KiB".format(part.get_content_type(), size))
for ct in DROP_CONTENT_TYPES:
if part.get_content_type().startswith(ct):
part.set_payload("")
dropped_items += 1
dropped_size += size
# update the message on disk
mb.update({key: message})
print("Dropped {} attachements ({:g} MiB).".format(dropped_items, dropped_size / 1024))
def argtype_dir_path(string):
if os.path.isdir(string):
return string
raise NotADirectoryError(string)
def argtype_maildir(string):
string = argtype_dir_path(string)
for sub in ["cur", "new", "tmp"]:
subdir = os.path.join(string, sub)
if not os.path.isdir(subdir):
raise NotADirectoryError(subdir)
return string
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Strip attachments from messages in a maildir.")
ap.add_argument("maildir", metavar="PATH", type=argtype_maildir,
help="path to the maildir")
args = ap.parse_args()
process_maildir(args.maildir)
================================================
FILE: makeissue.sh
================================================
echo -e '\e[H\e[2J' > issue
echo -e ' \e[1;30m| \e[34m\\s \\r' >> issue
echo -e ' \e[36;1m/\\\\ \e[37m|| \e[36m| | \e[30m|' >> issue
echo -e ' \e[36m/ \\\\ \e[37m|| \e[36m| _ \e[30m| \e[32m\\t' >> issue
echo -e ' \e[1;36m/ \e[0;36m.. \e[1m\\\\ \e[37m//==\\\\\\\\ ||/= /==\\\\ ||/=\\\\ \e[36m| | |/ \\\\ | | \\\\ / \e[30m| \e[32m\\d' >> issue
echo -e ' \e[0;36m/ . . \\\\ \e[37m|| || || | || || \e[36m| | | | | | X \e[1;30m|' >> issue
echo -e ' \e[0;36m/ . . \\\\ \e[37m\\\\\\\\==/| || \\\\==/ || || \e[36m| | | | \\\\_/| / \\\\ \e[1;30m| \e[31m\\U' >> issue
echo -e ' \e[0;36m/ .. .. \\\\ \e[0;37mA simple, lightweight linux distribution. \e[1;30m|' >> issue
echo -e ' \e[0;36m/_\x27 `_\\\\ \e[1;30m| \e[35m\\l \e[0mon \e[1;33m\\n' >> issue
echo -e ' \e[0m' >> issue
echo -e '' >> issue
================================================
FILE: mp3convert.py
================================================
#! /usr/bin/env python3
import sys
import os
import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
import re
import shutil
import subprocess
import shlex
from pythonscripts.cpu import cores_count
from pythonscripts.tempfiles import TempFiles
from pythonscripts.ffparser import FFprobeParser
audio_types = ("mp3", "aac", "ac3", "mp2", "wma", "wav", "mka", "m4a", "ogg", "oga", "flac")
audio_file_regex = re.compile("^(?P<dirname>/(.*/)*)(?P<filename>.*(?P<extension>\.(" + "|".join(audio_types) + ")))$")
ffmpeg_command = "ffmpeg -i {input} -acodec libmp3lame -ar 44100 -ab {bitrate:d}k -ac 2 -f mp3 -map_metadata 0 -y {output}"
class GettingBitrateError(Exception):
def __init__(self, fname):
self.message = "Couldn't get bitrate from file " + fname
class ConversionError(Exception):
def __init__(self, fname, status, output):
self.message = "Error while converting file " + fname + "\nffmpeg exited with status " + str(status) + "\n" + output
def get_bitrate(filename):
parser = FFprobeParser(filename)
bitrate = parser.get("audio", "bit_rate")
del parser
if bitrate is None:
raise GettingBitrateError(filename)
else:
return bitrate // 1000
def convert(filename, output_extension, bitrate, delete_after=False):
tmpfile = tmp.getTempFileName()
command = ffmpeg_command.format(input=shlex.quote(filename), bitrate=bitrate, output=shlex.quote(tmpfile))
try:
subprocess.run(command, shell=True, check=True, capture_output=True)
if delete_after:
os.remove(filename)
shutil.move(tmpfile, os.path.splitext(filename)[0] + output_extension)
tmp.remove(tmpfile)
except subprocess.CalledProcessError as e:
tmp.remove(tmpfile)
raise ConversionError(filename, e.returncode, e.output)
class Main():
def __init__(self, args):
self.countAudioFiles = 0
self.countHigherBitrate = 0
self.countDifferentFormat = 0
self.countErrors = 0
self.countNonAudioFiles = 0
self.dry_run = args.dry_run
self.bitrate = args.bitrate
self.verbose = args.verbose
self.recursive = args.recursive
self.deleteAfter = args.delete_after
self.outputExtension = "." + args.output_extension
self.paths = args.path
def print_stats(self):
print()
print("-----------collected statistics-----------")
print("All audio files (without errors): % 6d" % self.countAudioFiles)
print("Converted files: % 6d" % (self.countDifferentFormat + self.countHigherBitrate))
print(" - different format: % 6d" % self.countDifferentFormat)
print(" - %3s but higher bitrate: % 6d" % (self.outputExtension[1:], self.countHigherBitrate))
print("Errors: % 6d" % self.countErrors)
print("Non-audio files: % 6d" % self.countNonAudioFiles)
print("------------------------------------------")
def check(self, path):
match = re.match(audio_file_regex, path)
if not match:
self.countNonAudioFiles += 1
return False
filename = match.group("filename")
ext = match.group("extension")
self.countAudioFiles += 1
if ext != self.outputExtension:
self.countDifferentFormat += 1
return True
bitrate = get_bitrate(path)
if self.verbose > 0:
sys.stdout.write("% 3s kb/s: %s\n" % (bitrate, filename))
if bitrate > self.bitrate:
self.countHigherBitrate += 1
return True
return False
async def run(self):
# We could use the default single-threaded executor with basically the same performance
# (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number
# of workers and thus the maximum number of concurrent subprocesses.
with ThreadPoolExecutor(max_workers=cores_count()) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(executor, self.worker, path)
for path in self.queue_generator()
]
for result in await asyncio.gather(*tasks):
pass
self.print_stats()
def worker(self, path):
path = os.path.abspath(path)
try:
# check bitrate/filetype etc., skip if conversion not necessary
if not self.check(path) or self.dry_run:
return
print("Converting: {}".format(path))
convert(path, self.outputExtension, self.bitrate, self.deleteAfter)
except ConversionError as e:
msg = "ERROR: failed to convert file '{}'".format(path)
if self.verbose > 0:
msg += "\n" + e.message
print(msg, file=sys.stderr)
self.countErrors += 1
except GettingBitrateError as e:
msg = "ERROR: failed to get bitrate from file '{}'".format(path)
if self.verbose > 0:
msg += "\n" + e.message
print(msg, file=sys.stderr)
self.countErrors += 1
else:
print("Done: {}".format(path))
def queue_generator(self):
""" For each directory in self.files returns generator returning full paths to mp3 files in that folder.
If self.files contains file paths instead of directory, it's returned as [file].
"""
def walk(root):
dirs = []
files = []
for entry in os.scandir(root):
if entry.is_dir():
dirs.append(entry.name)
elif entry.is_file():
files.append(entry.name)
# first yield found files, then recurse into subdirs
for f in files:
yield os.path.join(root, f)
if self.recursive:
for d in dirs: # recurse into subdir
for f in walk(os.path.join(root, d)):
yield f
for path in self.paths:
if os.path.isdir(path):
for f in walk(path):
yield f
else:
yield path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="convert all audio files in given folder (recursively) to specified bitrate, skip if bitrate is less or equal")
parser.add_argument("path", action="store", nargs="+", help="path to file(s) to convert - filename or directory")
parser.add_argument("-r", "--recursive", action="store_true", help="browse folders recursively")
parser.add_argument("--dry-run", action="store_true", help="don't convert, only print stats")
parser.add_argument("-b", "--bitrate", action="store", type=int, metavar="BITRATE", default="128", help="set bitrate - in kb/s, default=128")
parser.add_argument("-v", "--verbose", action="count", default=0, help="set verbosity level")
parser.add_argument("--delete-after", action="store_true", help="delete old files after conversion")
parser.add_argument("--output-extension", choices=audio_types, type=str, default="mp3", help="set output extension")
args = parser.parse_args()
tmp = TempFiles()
main = Main(args)
asyncio.run(main.run())
================================================
FILE: nat-launch-subnet.sh
================================================
#!/bin/bash
function print_launch_subnet_usage()
{
echo "USAGE"
echo " $0 <up|down>"
cat <<'CONFIG'
REQUIRED VARIABLES
# The network interface card (NIC) that is connected to the internet or other
# wide area network.
wan_nic="wlan0"
# The network interface card connected to the subnet.
subnet_nic="eth0"
# The subnet IP mask.
mask=/24
# The subnet IP range.
subnet_ip=10.0.0.0$mask
# The IP of the subnet NIC on the subnet.
server_ip=10.0.0.100$mask
# The IP tables binary to use.
iptables=/usr/bin/idemptables
# The dnsmasq arguments - PID and lease files to use.
dnsmasq_pid=/tmp/dhcpd.pid
dnsmasq_lease=/tmp/dhcpd.lease
# The port of DNS service, see dnsmasq(8) for details. Specify "0" to disable DNS server.
dnsmasq_port=53
# The DHCP range, see dnsmasq(8) for details.
dnsmasq_dhcp_range="192.168.1.100,192.168.1.200,12h"
OPTIONAL VARIABLES
# Function or external scripts to run before before and after bringing the
# subnet NIC up or down: pre_up, post_up, pre_down, post_down
# pre_up as a function:
# function pre_up()
# {
# }
# pre_up as a script:
# pre_up=/path/to/script
# ip_forward=0
# The value of /proc/sys/net/ipv4/ip_forward to restore when shutting down
# the subnet.
CONFIG
}
function launch_subnet()
{
set -e
if [[ -z $1 ]]
then
print_launch_subnet_usage
exit 1
else
action="$1"
fi
if [[ -z $wan_nic ]]
then
echo "wan_nic is undefined"
exit 1
fi
if [[ -z $subnet_nic ]]
then
echo "subnet_nic is undefined"
exit 1
fi
if [[ -z $mask ]]
then
echo "mask is undefined"
exit 1
fi
if [[ -z $subnet_ip ]]
then
echo "subnet_ip is undefined"
exit 1
fi
if [[ -z $server_ip ]]
then
echo "server_ip is undefined"
exit 1
fi
if [[ -z $iptables ]]
then
echo "iptables is undefined"
exit 1
fi
if [[ -z $dnsmasq_pid ]]
then
echo "dnsmasq_pid is undefined"
exit 1
fi
if [[ -z $dnsmasq_lease ]]
then
echo "dnsmasq_lease is undefined"
exit 1
fi
if [[ -z $dnsmasq_port ]]
then
echo "dnsmasq_port is undefined"
exit 1
fi
if [[ -z $dnsmasq_dhcp_range ]]
then
echo "dnsmasq_dhcp_range is undefined"
exit 1
fi
case "$action" in
up)
# Enable IP forwarding.
echo 1 > /proc/sys/net/ipv4/ip_forward
## iptables rules are changed to fit my firewall config
## see http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html for original rules
# Open up DNS (53) and DHCP (67) ports on subnet_nic.
"$iptables" -A nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p tcp --dport 53 -j ACCEPT
"$iptables" -A nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p udp --dport 53 -j ACCEPT
"$iptables" -A nat-subnet -i "$subnet_nic" -p udp --dport 67 -j ACCEPT
# Reply to ICMP (ping) packets so clients can check their connections.
"$iptables" -A nat-subnet -i "$subnet_nic" -p icmp --icmp-type echo-request -j ACCEPT
#"$iptables" -A OUTPUT -i "$subnet_nic" -p icmp --icmp-type echo-reply -j ACCEPT
# Allow postrouting to wan_nic (for e.g. internet access on the subnet).
"$iptables" -t nat -A POSTROUTING -s "$subnet_ip" -o "$wan_nic" -j MASQUERADE
# Enable forwarding from subnet_nic to wan_nic (and back via related and established connections).
"$iptables" -A FORWARD -i "$subnet_nic" -s "$subnet_ip" -o "$wan_nic" -j ACCEPT
"$iptables" -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# Bring down subnet_nic, configure it and bring it up again.
if [[ ! -z $pre_up ]]
then
ip link set dev "$subnet_nic" down
"$pre_up"
fi
ip link set dev "$subnet_nic" up
if [[ ! -z $post_up ]]
then
"$post_up"
fi
# Set the static IP for subnet_nic.
ip addr add "$server_ip" dev "$subnet_nic"
# Ensure the lease file exists.
mkdir -p -- "${dnsmasq_lease%/*}"
[[ -f $dnsmasq_lease ]] || touch "$dnsmasq_lease"
# Launch the DHCP server
dnsmasq \
--pid-file="$dnsmasq_pid" \
--dhcp-leasefile="$dnsmasq_lease" \
--port="$dnsmasq_port" \
--interface="$subnet_nic" \
--except-interface=lo \
--bind-interfaces \
--dhcp-range="$dnsmasq_dhcp_range" \
--dhcp-authoritative \
--dhcp-option=6,"${server_ip%/*}"
;;
down)
# Kill the DHCP server.
if [[ -f $dnsmasq_pid ]]
then
kill $(cat "$dnsmasq_pid") && rm "$dnsmasq_pid" && echo "killed server"
fi
if [[ ! -z $pre_down ]]
then
"$pre_down"
fi
ip addr delete "$server_ip" dev "$subnet_nic"
ip link set dev "$subnet_nic" down
if [[ ! -z $post_down ]]
then
"$post_down"
fi
# Undo all of the changes above in reverse order.
"$iptables" -D FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
"$iptables" -D FORWARD -i "$subnet_nic" -s "$subnet_ip" -o "$wan_nic" -j ACCEPT
"$iptables" -t nat -D POSTROUTING -s "$subnet_ip" -o "$wan_nic" -j MASQUERADE
#"$iptables" -D OUTPUT -i "$subnet_nic" -p icmp --icmp-type echo-reply -j ACCEPT
"$iptables" -D nat-subnet -i "$subnet_nic" -p icmp --icmp-type echo-request -j ACCEPT
"$iptables" -D nat-subnet -i "$subnet_nic" -p udp --dport 67 -j ACCEPT
"$iptables" -D nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p udp --dport 53 -j ACCEPT
"$iptables" -D nat-subnet -i "$subnet_nic" -s "$subnet_ip" -p tcp --dport 53 -j ACCEPT
if [[ ! -z $ip_forward ]]
then
if [[ $ip_forward != $(cat /proc/sys/net/ipv4/ip_forward) ]]
then
echo $ip_forward > /proc/sys/net/ipv4/ip_forward
fi
else
echo 0 > /proc/sys/net/ipv4/ip_forward
fi
;;
*)
print_launch_subnet_usage
exit 1
;;
esac
}
================================================
FILE: nat-launch.sh
================================================
#!/bin/bash
# Original author: Xyne
# http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html
function print_usage() {
echo "usage: $0 <WAN interface> <subnet interface> <up|down>"
}
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root." >&2
exit 1
fi
if [[ -z $3 ]]; then
print_usage
exit 1
else
wan_nic="$1"
subnet_nic="$2"
action="$3"
fi
mask=/24
subnet_ip=192.168.1.0$mask
server_ip=192.168.1.23$mask
iptables=/usr/bin/idemptables
dnsmasq_pid=/run/dnsmasq_$subnet_nic.pid
dnsmasq_lease=/run/dnsmasq_$subnet_nic.lease
dnsmasq_port=0
dnsmasq_dhcp_range="192.168.1.100,192.168.1.150,6h"
source nat-launch-subnet.sh
launch_subnet "$action"
================================================
FILE: notify-brightness.sh
================================================
#! /bin/bash
# LCD brightness notification (level changed by ACPI, no action required)
# duration in ms
duration=1500
# get brightness level, set title
level=$(cat "/sys/class/backlight/intel_backlight/brightness")
max=$(cat "/sys/class/backlight/intel_backlight/max_brightness")
percent=$(( $level * 100 / $max ))
title="LCD brightness"
# create fancy bar
f=$((percent/10))
e=$((10-f))
fchars='◼◼◼◼◼◼◼◼◼◼'
echars='◻◻◻◻◻◻◻◻◻◻'
bar="${fchars:0:f}${echars:0:e} $percent%"
notify-send --app-name=VolumeNotification --expire-time="$duration" --urgency=low --transient "$title" "$bar"
================================================
FILE: notify-volume.sh
================================================
#!/bin/bash
# volume control (up/down/mute/unmute/toggle) + notification
# duration in ms
duration=1500
notify () {
# get volume level
percent=$(pactl get-sink-volume @DEFAULT_SINK@ | grep -Po '\d+(?=%)' | head -n 1)
# check if muted, set title
if [[ $(pactl get-sink-mute @DEFAULT_SINK@) == "Mute: yes" ]]; then
title="Volume muted"
else
title="Volume"
fi
# create fancy bar
f=$((percent/10))
e=$((10-f))
fchars='◼◼◼◼◼◼◼◼◼◼'
echars='◻◻◻◻◻◻◻◻◻◻'
bar="${fchars:0:f}${echars:0:e} $percent%"
notify-send --app-name=VolumeNotification --category=device --expire-time="$duration" --urgency=low --transient "$title" "$bar"
}
# redirect stdout of this script to /dev/null
exec > /dev/null
case "$1" in
up)
pactl set-sink-volume @DEFAULT_SINK@ +5%
pactl set-sink-mute @DEFAULT_SINK@ 0
;;
down)
pactl set-sink-volume @DEFAULT_SINK@ -5%
pactl set-sink-mute @DEFAULT_SINK@ 0
;;
mute)
pactl set-sink-mute @DEFAULT_SINK@ 1
;;
unmute)
pactl set-sink-mute @DEFAULT_SINK@ 0
;;
toggle)
pactl set-sink-mute @DEFAULT_SINK@ toggle
;;
esac
notify
================================================
FILE: pacman-disowned.sh
================================================
#!/bin/sh
tmp=${TMPDIR-/tmp}/pacman-disowned-$UID-$$
db=$tmp/db
fs=$tmp/fs
mkdir "$tmp"
trap 'rm -rf "$tmp"' EXIT
pacman -Qlq | sort -u > "$db"
find /etc /opt /usr ! -name lost+found \( -type d -printf '%p/\n' -o -print \) | sort > "$fs"
comm -23 "$fs" "$db"
================================================
FILE: pdf-extract.sh
================================================
#!/bin/bash
# exit on error
set -e
any2img() {
convert -density 150 "$1" -quality 100 "$2" &>/dev/null
}
pdf2img() {
echo "Splitting single pdf file by pages (tiff)"
stapler burst "$1"
base=${1%.*}
for i in "${base}_"*.pdf
do
out=pg${i#"$base"} # will result in 'pg_123.pdf'
out=${out%.*}.tiff # replace extension
echo "$out"
# any2img "$i" "$out"
convert -density 300 "$i" -compress lzw "$out"
rm -f "$i"
done
}
djvu2img() {
echo "Splitting single djvu file by pages (tiff)"
pages=`djvused -e "n" "$1"`
for (( i=1; i<=$pages; i++ ))
do
num=$(printf "%03d" "$i")
out="pg_$num.tiff"
echo " $out"
ddjvu -page=$i -format=tiff "$1" "$out"
done
}
path=$(realpath "$1")
filename=$(basename "$path")
extension=${filename##*.}
basename=${filename%.*} # filename without extension
# create directory for extracted images
mkdir -p "$basename"
cp "$path" "$basename"
cd "$basename"
if [[ "$extension" == "pdf" ]]; then
pdf2img "$filename"
rm -f "$filename"
elif [[ "$extension" == "djvu" ]]; then
djvu2img "$filename"
rm -f "$filename"
else
echo "Supported file types: pdf, djvu"
exit 1
fi
================================================
FILE: perm.sh
================================================
#!/bin/bash
opt=${1:-'-h'}
dir=${2:-'.'}
fmode=0644
dmode=0755
case "$1" in
-a) # dirs and files
find "$2" -type d -exec chmod $dmode "{}" +
find "$2" -type f -exec chmod $fmode "{}" +
;;
-d)
find "$2" -type d -exec chmod $dmode "{}" +
;;
-f)
find "$2" -type f -exec chmod $fmode "{}" +
;;
*)
printf "Usage: $(basename $0) option [directory]
-a \t set permissions of files and directories to $fmode, resp. $dmode.
-d \t set permissions of directories to $dmode.
-f \t set permissions of files to $fmode.
-h \t print this help.
"
;;
esac
================================================
FILE: pythonscripts/__init__.py
================================================
#!/usr/bin/env python
import os
import sys
# hack - enable importing from _this_ directory
sys.path.append(os.path.dirname(__file__))
from misc import *
from tempfiles import *
from terminal import *
================================================
FILE: pythonscripts/cpu.py
================================================
#! /usr/bin/env python3
def cores_count():
f = open("/proc/cpuinfo")
for line in f.readlines():
if line.startswith("cpu cores"):
try:
_, n = line.split(":")
return int(n.strip())
except ValueError:
continue
return 1
================================================
FILE: pythonscripts/daemon.py
================================================
#! /usr/bin/env python
import os
def spawnDaemon(*args, detach_fds=True):
"""Spawn a completely detached subprocess (i.e., a daemon).
E.g. for mark:
spawnDaemon("../bin/producenotify.py", "producenotify.py", "xx")
"""
if len(args) == 0:
raise ValueError("no arguments supplied")
# fork the first time (to make a non-session-leader child process)
try:
pid = os.fork()
except OSError as e:
raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno))
if pid != 0:
# parent (calling) process is all done
return
# detach from controlling terminal (to make child a session-leader)
os.setsid()
try:
pid = os.fork()
except OSError as e:
raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
raise Exception("%s [%d]" % (e.strerror, e.errno))
if pid != 0:
# child process is all done
os._exit(0)
if detach_fds:
# grandchild process now non-session-leader, detached from parent
# grandchild process must now close all open files
try:
maxfd = os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd = 1024
for fd in range(maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# redirect stdin, stdout and stderr to /dev/null
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
os.dup2(0, 1)
os.dup2(0, 2)
# and finally let's execute the executable for the daemon!
try:
os.execvp(args[0], args)
except Exception as e:
# oops, we're cut off from the world, let's just give up
os._exit(255)
================================================
FILE: pythonscripts/ffparser.py
================================================
#!/usr/bin/env python
import json
import subprocess
import shlex
from pprint import pprint
ffprobe = "ffprobe -v quiet -print_format json -show_format -show_streams "
class FFprobeParser:
def __init__(self, path):
self.data = json.loads(subprocess.check_output(ffprobe + shlex.quote(path), shell=True, universal_newlines=True))
self.format = self.data["format"]
self.audio = None
self.video = None
for stream in self.data["streams"]:
if self.audio is None and stream["codec_type"] == "audio":
self.audio = stream
if self.video is None and stream["codec_type"] == "video":
self.video = stream
def _get(self, option, attribute):
return getattr(self, option)[attribute]
def _getBitrate(self, option):
if option == "audio":
try:
return int(self._get("audio", "bit_rate"))
except:
return int(self._getBitrate("format")) - int(self._getBitrate("video"))
elif option == "video":
try:
return int(self._get("video", "bit_rate"))
except:
return int(self._getBitrate("format")) - int(self._getBitrate("audio"))
elif option == "format":
try:
return int(self._get("format", "bit_rate"))
except:
return None
def get(self, option, attribute):
""" 'option' is one of "audio", "video", "format"
'attribute' is the json attribute to query
"""
if attribute == "bit_rate":
return self._getBitrate(option)
else:
try:
return self._get(option, attribute)
except:
return None
def pprint(self, option):
""" 'option' is one of "audio", "video", "format",
otherwise 'self.data' is printed
"""
pprint(getattr(self, option, self.data))
================================================
FILE: pythonscripts/logger.py
================================================
#! /usr/bin/env python
"""
Simple logger object. Log level is integer for easy comparison.
"""
import sys
class Logger:
def __init__(self, log_level, prog_name):
self.log_level = log_level
self.prog_name = prog_name
self.filename = None
def prefix(self, msg):
if self.filename is None:
return msg
return "%s: %s" % (self.filename, msg)
def debug(self, msg):
if self.log_level >= 4:
print(self.prefix(msg))
def info(self, msg):
if self.log_level >= 3:
print(self.prefix(msg))
def warning(self, msg):
if self.log_level >= 2:
print(self.prefix("WARNING: %s" % msg))
def error(self, msg):
if self.log_level >= 1:
sys.stderr.write("%s: %s\n" % (self.prog_name, msg))
def critical(self, msg, retval=1):
self.error(msg)
sys.exit(retval)
================================================
FILE: pythonscripts/misc.py
================================================
#! /usr/bin/env python
"""
Human-readable file size. Algorithm does not use a for-loop. It has constant
complexity, O(1), and is in theory more efficient than algorithms using a for-loop.
Original source code from:
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
from math import log
unit_list = {
"long": list(zip(['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'], [0, 0, 1, 2, 2, 2])),
"short": list(zip(['B', 'K', 'M', 'G', 'T', 'P'], [0, 0, 1, 2, 2, 2])),
}
def format_sizeof(num, unit_format="long"):
if num > 1:
exponent = min(int(log(num, 1024)), len(unit_list[unit_format]) - 1)
quotient = float(num) / 1024**exponent
unit, num_decimals = unit_list[unit_format][exponent]
format_string = '{:.%sf} {}' % (num_decimals)
return format_string.format(quotient, unit)
else:
return str(int(num)) + " B"
"""
Nice time format, useful for ETA etc. Output is never longer than 6 characters.
"""
def format_time(seconds):
w, s = divmod(seconds, 3600*24*7)
d, s = divmod(s, 3600*24)
h, s = divmod(s, 3600)
m, s = divmod(s, 60)
if w > 0:
return "%dw" % w
if d > 0:
return "%dd%02dh" % (d, h)
if h > 0:
return "%02dh%02dm" % (h, m)
if m > 0:
return "%02dm%02ds" % (m, s)
return str(s)
"""
Get content of any readable text file.
"""
def cat(fname):
try:
f = open(fname, "r")
s = f.read()
f.close()
return s.strip()
except:
return None
"""
Returns a string of at most `max_length` characters, cutting
only at word-boundaries. If the string was truncated, `suffix`
will be appended.
"""
import re
def smart_truncate(text, max_length=100, suffix='...'):
if len(text) > max_length:
pattern = r'^(.{0,%d}\S)\s.*' % (max_length-len(suffix)-1)
return re.sub(pattern, r'\1' + suffix, text)
else:
return text
"""
Recursive directory creation function (like 'mkdir -p' in linux).
"""
import os
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17:
raise e
================================================
FILE: pythonscripts/tempfiles.py
================================================
#! /usr/bin/env python
"""
Create temporary file, close file descriptor and return full path of the file.
"""
import os
import tempfile
import atexit
class TempFiles:
def __init__(self):
self.tempFiles = []
atexit.register(self.removeAll)
def removeAll(self):
for file in self.tempFiles[:]:
self.remove(file)
def remove(self, file):
if file in self.tempFiles and os.path.exists(file):
os.remove(file)
self.tempFiles.remove(file)
def getTempFileName(self, prefix="tmp", suffix="", dir=None, text=False):
fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir, text=text)
os.close(fd)
self.tempFiles.append(path)
return path
================================================
FILE: pythonscripts/terminal.py
================================================
#! /usr/bin/env python
"""
Linux terminal colors.
"""
#import sys
COLORS = {"black":30, "red":31, "green":32, "yellow":33, "blue":34, "magenta":35, "cyan":36, "white":37}
def colorize(color, text):
c = COLORS[color]
return "\033[1;%im%s\033[0m" % (c, text)
# if sys.stdout.isatty():
# c = COLORS[color]
# return "\033[1;%im%s\033[0m" % (c, text)
# else:
# return text
def getColor(status, download_speed=0):
if status == "error":
return "red"
elif status == "active":
if download_speed > 0:
return "blue"
else:
return "yellow"
elif status == "complete":
return "green"
elif status == "paused":
return "cyan"
elif status == "waiting":
return "magenta"
else:
return ""
"""
Get size of unix terminal as tuple (width, height).
When all fails, default value is (80, 25).
Original source code from:
http://stackoverflow.com/a/566752
"""
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
================================================
FILE: qemu-launcher.sh
================================================
#! /usr/bin/bash
# Author: Jakub Klinkovský (Lahwaacz)
# https://github.com/lahwaacz
function print_usage() {
echo "usage: $0 <VM name>"
}
## Generate name of TAP interface to create
function get_tap_name() {
for (( i=0; i<$tap_limit; i++ )); do
local name="tap$i"
if [[ ! -d "/sys/class/net/$name" ]]; then
echo "$name"
break
fi
done
}
# do not run as root
if [[ $EUID -eq 0 ]]; then
echo "This script is not supposed to be run as root." >&2
exit 1
fi
# parse command line arguments
if [[ -z $1 ]]; then
print_usage
exit 1
else
vm_name="$1"
fi
sudo_args=("-Ap" "Enter your root password (QEMU launcher script)")
username=$(whoami)
tap_limit=10 # maximum number of TAP interfaces created by this script
tap_nic=$(get_tap_name)
br_nic="br0-qemu" # bridge interface name (will be created)
wan_nic="wlan0" # WAN interface name (for NAT)
case "$vm_name" in
btrfs)
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up
qemu-system-x86_64 \
-name "$vm_name" \
-monitor stdio \
-enable-kvm -smp 2 -cpu host -m 1024 \
-vga qxl -spice port=5931,disable-ticketing \
-drive file="/home/lahwaacz/virtual_machines/archlinux-btrfs.raw",if=virtio,cache=none -boot once=c \
-net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \
-usbdevice tablet
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down
;;
virtarch)
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up
qemu-system-x86_64 \
-name "$vm_name" \
-monitor stdio \
-enable-kvm -smp 2 -cpu host -m 1024 \
-vga qxl -spice port=5931,disable-ticketing \
-drive file="/home/lahwaacz/virtual_machines/archlinux.raw",if=virtio,cache=none -boot once=c \
-net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \
-usbdevice tablet
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down
;;
winxp)
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" up
qemu-system-i386 \
-name "$vm_name" \
-monitor stdio \
-enable-kvm -smp 2 -cpu host -m 1024 \
-vga qxl -spice port=5930,disable-ticketing \
-drive file="/home/lahwaacz/virtual_machines/winxp.raw",if=virtio,cache=none -boot order=c \
-net nic,model=virtio,macaddr=$(qemu-mac-hasher.py "$vm_name") -net tap,ifname="$tap_nic",script=no,downscript=no,vhost=on \
-usbdevice tablet \
-soundhw ac97 \
-localtime
sudo "${sudo_args[@]}" qemu-tap-helper.sh "$username" "$tap_nic" "$br_nic" "$wan_nic" down
;;
liveiso)
if [[ -z "$2" ]]; then
echo "You must specify the ISO file as a second argument." >&2
exit 1
fi
qemu-system-x86_64 \
-name "$vm_name" \
-monitor stdio \
-enable-kvm -smp 2 -cpu host -m 1024 \
-vga virtio \
-display gtk,gl=on \
-drive file="$2",if=virtio,media=cdrom -boot once=d \
-net nic -net user \
-usbdevice tablet
;;
liveiso-efi)
if [[ -z "$2" ]]; then
echo "You must specify the ISO file as a second argument." >&2
exit 1
fi
if [[ ! -e "/usr/share/ovmf/x64/OVMF_CODE.fd" ]]; then
echo "File /usr/share/ovmf/x64/OVMF_CODE.fd does not exist. Is the package ovmf installed?" >&2
exit 1
fi
qemu-system-x86_64 \
-bios /usr/share/ovmf/x64/OVMF_CODE.fd \
-name "$vm_name" \
-monitor stdio \
-enable-kvm -smp 2 -cpu host -m 1024 \
-vga virtio \
-display gtk,gl=on \
-drive file="$2",if=virtio,media=cdrom -boot once=d \
-net nic -net user \
-usbdevice tablet
;;
*)
echo "Unknown VM name specified: $vm_name" >&2
exit 1
;;
esac
### frequently/previously used options:
## user-mode networking
# -net nic,model=virtio -net user
## user-mode networking with redirect (localhost:2222 -> 10.0.2.15:22)
# -net nic,model=virtio -net user -redir tcp:2222:10.0.2.15:22
================================================
FILE: qemu-mac-hasher.py
================================================
#!/usr/bin/env python
# Author: Jakub Klinkovský (Lahwaacz)
# https://github.com/lahwaacz
import sys
import zlib
if len(sys.argv) != 2:
print("usage: %s <VM Name>" % sys.argv[0])
sys.exit(1)
crc = zlib.crc32(sys.argv[1].encode("utf-8")) & 0xffffffff
crc = str(hex(crc))[2:]
print("52:54:%s%s:%s%s:%s%s:%s%s" % tuple(crc))
================================================
FILE: qemu-tap-helper.sh
================================================
#! /usr/bin/bash
# Author: Jakub Klinkovský (Lahwaacz)
# https://github.com/lahwaacz
########## Functions ##########
## Check if a string represents a network interface
# $1: potential interface name
function is_interface() {
[[ -d "/sys/class/net/$1" ]]
}
## Create new TAP interface
# $1: name of the interface to create
function create_tap() {
if ! is_interface "$1"; then
echo "Creating TAP interface '$1'"
ip tuntap add "$1" mode tap user "$username"
ip link set dev "$1" up
fi
}
## Delete TAP interface
# $1: name of the interface to delete
function del_tap() {
echo "Deleting TAP interface '$1'"
ip link set dev "$1" down
ip tuntap del "$1" mode tap
}
## Check if the bridge has any interface
# $1: bridge interface name
function bridge_is_empty() {
[[ $(ls "/sys/class/net/$1/brif" | wc -w) == "0" ]]
}
## Create bridge interface if it does not exist
# $1: bridge interface name
function create_br() {
if is_interface "$1"; then
if [[ ! -d "/sys/class/net/$1/brif" ]]; then
echo "Interface '$1' already exists and is not a bridge"
exit 1
fi
else
echo "Creating bridge interface '$1'"
ip link add name "$1" type bridge
ip link set dev "$1" up
# Xyne's excellent script to launch NAT
echo "Starting NAT"
nat-launch.sh "$wan_nic" "$1" up
fi
}
## Delete bridge interface if it exists and has no interface
# $1: bridge interface name
function del_br() {
if bridge_is_empty "$1"; then
# Xyne's excellent script to launch NAT
echo "Stopping NAT"
nat-launch.sh "$wan_nic" "$1" down
echo "Deleting bridge interface '$1'"
ip link set dev "$1" down
ip link delete "$1" type bridge
fi
}
## Add interface to the bridge
# $1: bridge interface name
# $2: name of the interface to add
function br_add_iface() {
echo "Adding interface '$2' to bridge '$1'"
ip link set dev "$2" promisc on up
ip addr flush dev "$2" scope host &>/dev/null
ip addr flush dev "$2" scope site &>/dev/null
ip addr flush dev "$2" scope global &>/dev/null
ip link set dev "$2" master "$1"
# skip forwarding delay
bridge link set dev "$2" state 3
}
## Remove interface from the bridge
# $1: bridge interface name
# $2: name of the interface to remove
function br_rm_iface() {
echo "Removing interface '$2' from bridge '$1'"
ip link set "$2" promisc off down
ip link set dev "$2" nomaster
}
########## Main ###############
function print_qemu_tap_helper_usage() {
echo "usage: $0 <username> <TAP interface> <bridge interface> <WAN interface> <up|down>"
echo " <TAP interface> and <bridge interface> will be created,"
echo " NAT from <WAN interface> to <bridge interface> will be set up"
}
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root." >&2
exit 1
fi
if [[ -z $4 ]]; then
print_qemu_tap_helper_usage
exit 1
else
username="$1"
tap_nic="$2"
br_nic="$3"
wan_nic="$4"
action="$5"
fi
# exit on errors
set -e
case "$action" in
up)
create_br "$br_nic"
create_tap "$tap_nic"
br_add_iface "$br_nic" "$tap_nic"
;;
down)
br_rm_iface "$br_nic" "$tap_nic"
del_tap "$tap_nic"
del_br "$br_nic"
;;
*)
print_qemu_tap_helper_usage
exit 1
;;
esac
================================================
FILE: remove-dead-symlinks.sh
================================================
#! /bin/bash
# recursively remove dead symlinks
shopt -s globstar
# non-recursive version: 'for itm in *'
for itm in **/*
do
if [ -h "$itm" ]
then
target=$(readlink -fn "$itm")
if [ ! -e "$target" ]
then
echo "$itm"
rm "$itm"
fi
fi
done
================================================
FILE: replaygain.py
================================================
#! /usr/bin/env python3
import sys
import os
import argparse
import subprocess
import asyncio
from concurrent.futures import ThreadPoolExecutor
import taglib
from pythonscripts.cpu import cores_count
from pythonscripts.logger import Logger
class ReplayGain:
""" Will consider all files to belong to one album.
"""
def __init__(self, logger, options, files):
# logger
self.log = logger
self.log.filename = None
# internals
self.raw_lines = []
self.data_files = []
self.data_album = {}
# options
self.force = options.force
self.force_album = options.force_album
self.force_track = options.force_track
self.files = files
def run(self):
# check if all files have ReplayGain tags; mp3gain runs very long
if not (self.force or self.force_album or self.force_track) and self.all_files_have_replaygain_tags():
self.log.error("All files already have ReplayGain tags, no action taken.")
return
if self.run_mp3gain():
self.update_tags()
def all_files_have_replaygain_tags(self):
""" Quick analysis to determine if input files contain replaygain_* tags.
"""
for fname in self.files:
# open id3 tag
f = taglib.File(fname)
tags = set([tag.lower() for tag in f.tags.keys() if tag.lower().startswith("replaygain_")])
return tags == set(["replaygain_track_gain", "replaygain_album_gain", "replaygain_track_peak", "replaygain_album_peak"])
def run_mp3gain(self):
""" Compute values for replaygain_* tags.
"""
self.log.debug("running mp3gain on specified files")
cmd = ["mp3gain", "-q", "-o", "-s", "s"] + self.files
ret = True
try:
raw_data = subprocess.check_output(cmd, universal_newlines=True)
self.raw_lines = raw_data.splitlines()
except subprocess.CalledProcessError as exc:
code = exc.returncode
msg = "mp3gain returned error status: " + str(code) + "\n"
msg += "-----------mp3gain output dump-----------\n"
msg += exc.output
msg += "\n-----------------------------------------\n"
self.log.error(msg)
ret = False
except Exception as e:
print(e)
ret = False
raise
finally:
return ret
def update_tags(self):
""" Add computed replaygain_* tags into all files.
"""
self.log.debug("parsing mp3gain output")
album_parts = self.raw_lines[-1].strip().split("\t")
# just in case
if album_parts[0] != '"Album"':
self.log.error("unable to parse mp3gain output")
return
a_gain = float(album_parts[2]) # album gain
a_peak = float(album_parts[3]) / 32768.0 # album peak
del self.raw_lines[0] # header
del self.raw_lines[-1] # album summary
for line in self.raw_lines:
parts = line.strip().split("\t")
fname = parts[0] # filename
self.log.filename = fname
self.log.debug("begin processing file")
t_gain = float(parts[2]) # track gain
t_peak = float(parts[3]) / 32768.0 # track peak
# set t_gain, t_peak, a_gain, a_peak depending on options
if self.force_album:
t_gain = a_gain
t_peak = a_peak
elif self.force_track:
a_gain = t_gain
a_peak = t_peak
# open id3 tag
f = taglib.File(fname)
# update tag
f.tags["REPLAYGAIN_TRACK_GAIN"] = "%.2f dB" % t_gain
f.tags["REPLAYGAIN_ALBUM_GAIN"] = "%.2f dB" % a_gain
f.tags["REPLAYGAIN_TRACK_PEAK"] = "%.6f" % t_peak
f.tags["REPLAYGAIN_ALBUM_PEAK"] = "%.6f" % a_peak
# save tag
self.log.debug("saving modified ID3 tag")
f.save()
self.log.debug("done processing file")
self.log.filename = None
class Main:
""" Will parse input pattern and create ReplayGain object on every directory found.
"""
def __init__(self, logger, options):
self.logger = logger
self.options = options
self.recursive = options.recursive
self.paths = options.files
del options.recursive # don't want to pass it to ReplayGain object
del options.files # don't want to pass it to ReplayGain object
async def run(self):
# We could use the default single-threaded executor with basically the same performance
# (because of Python's GIL), but the ThreadPoolExecutor allows to limit the maximum number
# of workers and thus the maximum number of concurrent subprocesses.
with ThreadPoolExecutor(max_workers=cores_count()) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(executor, self.worker, path)
for path in self.queue_generator()
]
for result in await asyncio.gather(*tasks):
pass
def worker(self, paths):
paths = sorted(list(paths))
# skip dirs not containing any mp3 file
if len(paths) == 0:
return
# write info
print("Procesing:")
for path in paths:
print(" " + path)
try:
# create ReplayGain object, pass files and run
rg = ReplayGain(self.logger, self.options, paths)
rg.run()
except Exception as e:
print(e, file=sys.stderr)
raise
def queue_generator(self):
""" For each directory in self.files returns generator returning full paths to mp3 files in that folder.
If self.files contains file paths instead of directory, it's returned as [file].
"""
def walk(root):
dirs = []
files = []
for entry in os.scandir(root):
if entry.is_dir():
dirs.append(entry.name)
elif entry.is_file() and entry.name.endswith(".mp3"):
files.append(entry.name)
# first yield found files, then recurse into subdirs
if files:
yield (os.path.join(root, x) for x in files)
if self.recursive:
for d in dirs: # recurse into subdir
for x in walk(os.path.join(root, d)):
yield x
for path in self.paths:
if os.path.isdir(path):
for x in walk(path):
yield x
else:
yield [path]
def main(prog_name, options):
logger = Logger(options.log_level, prog_name)
logger.debug("Selected mp3 files:")
logger.debug("\n".join(sorted(options.files)))
main = Main(logger, options)
asyncio.run(main.run())
def argparse_path_handler(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError("invalid path: '%s'" % path)
if os.path.isfile(path) and not path.endswith(".mp3"):
raise argparse.ArgumentTypeError("not a mp3 file: '%s'" % path)
return os.path.abspath(path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Write correct ReplayGain tags into mp3 files; uses mp3gain internally")
# log level options
log = parser.add_mutually_exclusive_group()
log.add_argument("-q", "--quiet", dest="log_level", action="store_const", const=0, default=1, help="do not output error messages")
log.add_argument("-v", "--verbose", dest="log_level", action="store_const", const=3, help="output warnings and informational messages")
log.add_argument("-d", "--debug", dest="log_level", action="store_const", const=4, help="output debug messages")
parser.add_argument("-r", "--recursive", action="store_true", help="when path to directory is specified, browse it recursively (albums still respected)")
parser.add_argument("--force", action="store_true", help="force overwriting of existing ID3v2 ReplayGain tags")
group = parser.add_mutually_exclusive_group()
group.add_argument("--force-album", action="store_true", help="write replaygain_album_{gain,peak} values into replaygain_track_{gain,peak} tags")
group.add_argument("--force-track", action="store_true", help="write replaygain_track_{gain,peak} values into replaygain_album_{gain,peak} tags")
parser.add_argument("files", nargs="+", metavar="FILE | FOLDER", type=argparse_path_handler, help="path to mp3 file(s) or directory(ies)")
args = parser.parse_args()
main(sys.argv[0], args)
================================================
FILE: rexe
================================================
#!/bin/bash
set -e
HOST=""
LOCAL_PATH=""
REMOTE_PATH=""
REXE_DIR="rexe"
CMD=""
DOWNLOAD="true"
EXCLUDE=()
EXCLUDE_DOWNLOAD=()
function handle_argument()
{
if [[ "$HOST" == "" ]]; then
case "$1" in
*:*)
HOST="${1%:*}"
REMOTE_PATH="${1#*:}"
;;
*)
HOST="$1"
;;
esac
return
fi
if [[ "$CMD" == "" ]]; then
CMD="$1"
else
CMD="$CMD $1"
fi
}
while [ "$#" -gt 0 ]; do
if [[ "$CMD" == "" ]]; then
case "$1" in
--path=*) LOCAL_PATH="${1#*=}"; shift 1;;
-p|--path) LOCAL_PATH="$2"; shift 2;;
--no-download) DOWNLOAD="false"; shift 1;;
--exclude=*) EXCLUDE+=("--exclude" "${1#*=}"); shift 1;;
--exclude) EXCLUDE+=("--exclude" "$2"); shift 2;;
--exclude-download=*) EXCLUDE_DOWNLOAD+=("--exclude" "${1#*=}"); shift 1;;
--exclude-download) EXCLUDE_DOWNLOAD+=("--exclude" "$2"); shift 2;;
-*) echo "unknown option: $1" >&2; exit 1;;
*) handle_argument "$1"; shift 1;;
esac
else
handle_argument "$1"
shift 1
fi
done
if [[ "$HOST" == "" ]]; then
echo "error: remote host was not specified." >&2
exit 1
fi
if [[ "$CMD" == "" ]]; then
echo "error: remote command was not sepcified." >&2
exit 1
fi
# fill in defaults
if [[ "$LOCAL_PATH" == "" ]]; then
LOCAL_PATH=$(pwd)
fi
# NOTE: the tmpfs for $XDG_RUNTIME_DIR may be too small (e.g. only 10% of the available RAM)
#if [[ "$REMOTE_PATH" == "" ]]; then
# REMOTE_PATH=$(ssh "$HOST" echo '$XDG_RUNTIME_DIR')
#fi
if [[ "$REMOTE_PATH" == "" ]]; then
REMOTE_PATH="/tmp"
REXE_DIR="rexe_$(ssh "$HOST" whoami)"
fi
if [[ ! -d "$LOCAL_PATH" ]]; then
echo "error: local path '$LOCAL_PATH' is does not exist or is not a directory." >&2
exit 1
fi
# create remote main directory for rexe with restricted permissions
echo "Creating remote directory '$REMOTE_PATH/$REXE_DIR'..."
ssh "$HOST" mkdir -m 0700 -p "$REMOTE_PATH/$REXE_DIR"
# change remote path into full path
_basename=$(basename "$LOCAL_PATH")
REMOTE_PATH="$REMOTE_PATH/$REXE_DIR/$_basename"
echo "Uploading local directory '$LOCAL_PATH' to remote directory '$REMOTE_PATH'..."
rsync -rlptD "$LOCAL_PATH/" "$HOST:$REMOTE_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]}
echo "Executing remote command '$CMD'..."
# ignore errors of the ssh command to always run rsync afterwards (even on keyboard interrupt)
set +e
ssh -t "$HOST" "cd ${REMOTE_PATH@Q}; bash --login -c -- ${CMD@Q}"
set -e
if [[ "$DOWNLOAD" != "false" ]]; then
echo "Synchronizing remote directory '$REMOTE_PATH' into the local directory..."
# FIXME: EXCLUDE_DOWNLOAD does not work correctly for wildcards
echo rsync -rlptD "$HOST:$REMOTE_PATH/" "$LOCAL_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v
rsync -rlptD "$HOST:$REMOTE_PATH/" "$LOCAL_PATH/" -e ssh -zz --info=progress2 --delete ${EXCLUDE[@]} ${EXCLUDE_DOWNLOAD[@]} -v
fi
================================================
FILE: rmshit.py
================================================
#! /usr/bin/env python3
import os
import shutil
from pathlib import Path
import yaml
DEFAULT_CONFIG = """
- ~/.adobe # Flash crap
- ~/.macromedia # Flash crap
- ~/.recently-used
- ~/.local/share/recently-used.xbel
- ~/.thumbnails
- ~/.gconfd
- ~/.gconf
- ~/.local/share/gegl-0.2
- ~/.FRD/log/app.log # FRD
- ~/.FRD/links.txt # FRD
- ~/.objectdb # FRD
- ~/.gstreamer-0.10
- ~/.pulse
- ~/.esd_auth
- ~/.config/enchant
- ~/.spicec # contains only log file; unconfigurable
- ~/.dropbox-dist
- ~/.parallel
- ~/.dbus
- ~/ca2 # WTF?
- ~/ca2~ # WTF?
- ~/.distlib/ # contains another empty dir, don't know which software creates it
- ~/.bazaar/ # bzr insists on creating files holding default values
- ~/.bzr.log
- ~/.nv/
- ~/.viminfo # configured to be moved to ~/.cache/vim/viminfo, but it is still sometimes created...
- ~/.npm/ # npm cache
- ~/.java/
- ~/.swt/
- ~/.oracle_jre_usage/
- ~/.openjfx/
- ~/.org.jabref.gui.JabRefMain/
- ~/.org.jabref.gui.MainApplication/
- ~/.jssc/
- ~/.tox/ # cache directory for tox
- ~/.pylint.d/
- ~/.qute_test/
- ~/.QtWebEngineProcess/
- ~/.qutebrowser/ # created empty, only with webengine backend
- ~/.asy/
- ~/.cmake/
- ~/.gnome/
- ~/unison.log
- ~/.texlive/
- ~/.w3m/
- ~/.subversion/
- ~/nvvp_workspace/ # created empty even when the path is set differently in nvvp
- ~/.ansible/
- ~/.fltk/
- ~/.vnc/
- ~/.local/share/Trash/ # VSCode puts deleted files here
"""
def get_size(path):
if Path(path).is_dir():
return sum(p.stat().st_size for p in Path(path).rglob("*"))
return Path(path).stat().st_size
def read_config():
"""
Reads the list of shitty files from a YAML config.
"""
config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config/"))
config_path = Path(config_dir) / "rmshit.yaml"
# write default config if it does not exist
if not config_path.exists():
with open(config_path, "w") as f:
print(DEFAULT_CONFIG.strip(), file=f)
with open(config_path, "r") as f:
return yaml.safe_load(f)
def yesno(question, default="n"):
"""
Asks the user for YES or NO, always case insensitive.
Returns True for YES and False for NO.
"""
prompt = f"{question} (y/[n]) "
ans = input(prompt).strip().lower()
if not ans:
ans = default
if ans == "y":
return True
return False
def format_size(size_in_bytes):
"""Format file size in bytes to a human-readable string."""
if size_in_bytes <= 0:
return "0 bytes"
units = ["bytes", "KiB", "MiB", "GiB"]
size = float(size_in_bytes)
unit_index = min(int((size_in_bytes.bit_length() - 1) // 10), len(units) - 1)
size /= 1024**unit_index
return f"{size:.4g} {units[unit_index]}"
def rmshit():
shittyfiles = read_config()
print("Found shittyfiles:")
found = []
total_size = 0
for f in shittyfiles:
absf = os.path.expanduser(f)
if os.path.exists(absf):
found.append(absf)
size = get_size(absf)
total_size += size
print(f" {f} ({format_size(size)})")
if len(found) == 0:
print("No shitty files found :)")
return
if yesno("Remove all?", default="n"):
for f in found:
if os.path.isfile(f):
os.remove(f)
else:
shutil.rmtree(f)
print(f"All cleaned, {format_size(total_size)} freed.")
else:
print("No file removed")
if __name__ == "__main__":
rmshit()
================================================
FILE: run-pvserver
================================================
#!/bin/bash
host="$1"
if [[ "$host" == "" ]]; then
echo "usage: $0 [user@]hostname"
exit 1
fi
hostname=$(ssh "$host" "uname -n")
port=11111
display=42
# NOTE: bash -lc is needed to get full $PATH by sourcing /etc/profile.d/*.sh
ssh -C -t -L "localhost:$port:$hostname:$port" "$host" "bash -lc 'xvfb-run --server-num=$display mpirun -np 2 pvserver --displays=:$display --server-port=$port'"
================================================
FILE: sway-sensible-terminal
================================================
#! /usr/bin/env python3
import os
import sys
import json
import subprocess
PATH = os.environ.get("PATH", "/usr/bin")
TERMINAL = os.environ.get("TERMINAL", "alacritty")
ARGS = sys.argv[1:]
def get_cwd(tree):
for node in tree.get("nodes", []):
if node["focused"]:
name = node["name"]
for part in name.split(":"):
if part.startswith("/") and os.path.exists(part):
return part
cwd = get_cwd(node)
if cwd is not None:
return cwd
if os.path.isfile("/usr/bin/swaymsg"):
prog = "swaymsg"
elif os.path.isfile("/usr/bin/i3-msg"):
prog = "i3-msg"
else:
raise Exception("Neither swaymsg or i3-msg was found in /usr/bin/")
cmd = subprocess.run(f"{prog} -t get_tree", shell=True, check=True, capture_output=True)
tree = json.loads(cmd.stdout)
cwd = get_cwd(tree)
if cwd is not None:
if "alacritty" in TERMINAL:
ARGS = ["--working-directory", cwd, *ARGS]
else:
ARGS = ["-d", cwd, *ARGS]
for d in PATH.split(":"):
path = os.path.join(d, TERMINAL)
if not os.path.isfile(path):
continue
os.execl(path, path, *ARGS)
raise Exception(f"Error: terminal '{TERMINAL}' was not found in $PATH ({PATH})")
================================================
FILE: teams-attendance-parser.py
================================================
#! /usr/bin/env python3
"""
THE BEER-WARE LICENSE (Revision 42):
Jakub Klinkovský wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return.
"""
import argparse
import os.path
import datetime
import sys
# maybe depends on the locale in which MS Teams runs...
TIMESTAMP_FORMATS = [
"%m/%d/%Y, %I:%M:%S %p",
"%d. %m. %Y %H:%M:%S",
]
CLASS_LENGTH = datetime.timedelta(minutes=100)
def parse_timestamp(timestamp):
last_error = None
for format in TIMESTAMP_FORMATS:
try:
return datetime.datetime.strptime(timestamp, format)
except ValueError as e:
last_error = e
continue
raise last_error
def parse_attendance_list(path):
print(f"Parsing file {path}...")
data = {}
text = open(path, "r", encoding="utf-16").read()
for line in text.splitlines():
# parse items on the line
name, action, timestamp = line.split("\t")
# skip header line
if name == "Full Name" or name == "Celé jméno":
continue
# validate items
assert "," in name, name
assert action in {"Joined", "Left", "Připojeno", "Odpojil(a) se"}, f"unknown action: {action}"
timestamp = parse_timestamp(timestamp)
# initialize data
user_actions = data.setdefault(name, [])
# append action
user_actions.append((action, timestamp))
return data
def get_attendance(class_start, actions):
class_end = class_start + CLASS_LENGTH
# make sure actions are sorted by timestamp
actions.sort(key=lambda a: a[1])
# calculate
attendance = datetime.timedelta()
joined = None
for i, item in enumerate(actions):
action, timestamp = item
if action in {"Joined", "Připojeno"}:
assert joined is None
joined = timestamp
elif action in {"Left", "Odpojil(a) se"}:
assert joined is not None
attendance += timestamp - joined
joined = None
else:
assert False
# handle the missing "Left" action
if joined is not None:
attendance += class_end - joined
return attendance
def print_attendance(teacher, class_start, data):
print(f"Class teacher:\t{teacher}")
print(f"Class start:\t{class_start}")
print("Attendance:")
for name in sorted(data.keys()):
attendance = get_attendance(class_start, data[name])
perc = attendance.seconds / CLASS_LENGTH.seconds * 100
print(f" {name:<30}\t{attendance} ({perc:.0f}%)")
print()
def main(path):
data = parse_attendance_list(path)
teacher = list(data.keys())[0]
class_start = data[teacher][0][1]
del data[teacher]
print_attendance(teacher, class_start, data)
parser = argparse.ArgumentParser(description="parser for MS Teams attendance list files")
parser.add_argument("path", nargs="+", help="path to the attendance list file")
args = parser.parse_args()
for p in args.path:
if os.path.isfile(p):
main(p)
else:
print(f"ERROR: {p} is not a file", file=sys.stderr)
================================================
FILE: toggle-touchpad.sh
================================================
#!/bin/sh
# Toggle touchpad status
# Using libinput and xinput
# Use xinput list and do a search for touchpads. Then get the first one and get its name.
device="$(xinput list | grep -P '(?<= )[\w\s:]*(?i)(touchpad|synaptics)(?-i).*?(?=\s*id)' -o | head -n1)"
# If it was activated disable it and if it wasn't disable it
[[ "$(xinput list-props "$device" | grep -P ".*Device Enabled.*\K.(?=$)" -o)" == "1" ]] &&
xinput disable "$device" ||
xinput enable "$device"
================================================
FILE: touch-tree.py
================================================
#! /usr/bin/env python
# Little script to "touch" directory structure.
# Works like 'cp -r', but instead of copying full file, the new file is "touched",
# so the tree structure is preserved and only empty files created.
import sys
import os
class Main:
def __init__(self, oldRoot, newRoot):
self.oldRoot = oldRoot
self.newRoot = newRoot
def browse(self, path):
for file in os.listdir(path):
absPath = os.path.join(path, file)
relPath = os.path.relpath(absPath, self.oldRoot)
if os.path.isdir(absPath):
os.mkdir(os.path.join(self.newRoot, relPath))
self.browse(absPath)
elif os.path.isfile(absPath):
open(os.path.join(self.newRoot, relPath), "w").close()
def touchTree(self):
os.mkdir(newRoot)
self.browse(self.oldRoot)
if len(sys.argv) != 3 or not os.path.isdir(sys.argv[1]) or os.path.exists(sys.argv[2]):
sys.exit(1)
oldRoot = os.path.abspath(sys.argv[1])
newRoot = os.path.abspath(sys.argv[2])
print(oldRoot + " => " + newRoot)
main = Main(oldRoot, newRoot)
main.touchTree()
================================================
FILE: waybar-khal.py
================================================
#! /usr/bin/env python3
import subprocess
import json
data = {}
cmd = [
"khal",
"list",
"now",
"23:59",
"--once",
"--format",
"{start-time} ({location}) {title}{repeat-symbol}{alarm-symbol}",
]
output = subprocess.run(cmd, check=True, text=True, capture_output=True).stdout
lines = [line.strip() for line in output.split("\n")]
# filter out lines that do not start with a number
# (khal list includes headings like "Monday, 2025-03-31" for each day)
lines = [line for line in lines if line and line[0].isdigit()]
if lines:
data["text"] = " " + lines[0]
data["tooltip"] = "\n".join(lines)
else:
data["text"] = ""
print(json.dumps(data))
================================================
FILE: x
================================================
#! /bin/bash
# Some references:
# https://wiki.archlinux.org/index.php/Bash#Functions
# https://github.com/robbyrussell/oh-my-zsh/blob/master/plugins/extract/extract.plugin.zsh
function extract() {
local remove_archive
local success
local fname
local basename
local extension
success=0
fname=$(realpath "$1")
extension=${fname##*.}
# remove extension from basename
basename=$(basename "${fname%.*}")
# hack to recognize .tar.gz etc as extension
if [[ "${basename##*.}" == "tar" ]]; then
extension="${basename##*.}.$extension"
basename=$(basename "${basename%.*}")
fi
# split \.part[0-9]* from $basename
basename="${basename%\.part[0-9]*}"
case "$extension" in
tar.gz|tgz|tar.bz2|tbz|tbz2|tar.xz|txz|tar.lzma|tlz|tar|tar.zst)
mkdir "$basename"
tar xvf "$fname" -C "$basename"
;;
gz|Z)
gzip -dkv "$fname"
;;
bz2)
bzip2 -dkv "$fname"
;;
xz|lzma)
xz -dkv "$fname"
;;
zst)
zstd -dkv "$fname"
;;
zip)
unzip "$fname" -d "$basename"
;;
rar)
mkdir "$basename"
pushd "$basename"
unrar x "$fname"
popd
;;
7z)
7za x "$fname" -o"$basename"
;;
*)
echo "extract: '$fname' cannot be extracted" 1>&2
success=1
;;
esac
[[ $success == 0 ]] && success=$?
# if destination directory contains only one file/dir, move it to cwd
if [[ $success == 0 ]]; then
count=$(find "$basename" -maxdepth 1 -mindepth 1 | wc -l)
if [[ $count == 1 ]]; then
name=$(basename "$(find "$basename" -maxdepth 1 -mindepth 1)")
# can't move ./foo/foo into ./foo
if [[ "$basename" == "$name" ]]; then
tmp="$name.tmp"
else
tmp="$name"
fi
mv "$basename/$name" "$tmp"
rmdir "$basename"
mv "$tmp" "$name"
fi
fi
}
if [[ $# == 0 ]]; then
echo "Usage: $0 file [file ...]"
exit 1
fi
while [[ $# > 0 ]]; do
if [[ -f "$1" ]]; then
extract "$1"
else
echo "extract: '$1' is not a valid file"
fi
shift
done
gitextract_szwjc667/ ├── .gitmodules ├── Colours-EyeCandy/ │ ├── colourbars │ ├── colours │ ├── colourtheme │ ├── hypnotoad.pl │ ├── pacman.sh │ ├── spacey.sh │ └── tanks.sh ├── README.md ├── aur-check ├── aur-release ├── aur-remotebuild ├── backup-system.sh ├── batmanpager ├── bsnap.sh ├── btrfs-diff ├── btrfs-sync ├── btrfs-sync-WIP ├── clean-aur-dir.py ├── convertToUtf8.py ├── fatcp ├── ffparser.py ├── fmount.py ├── forcemp3convert.sh ├── hddtemp.sh ├── imap-notifier.py ├── img2pdf.sh ├── initscreen.sh ├── maildir-strip-attachments.py ├── makeissue.sh ├── mp3convert.py ├── nat-launch-subnet.sh ├── nat-launch.sh ├── notify-brightness.sh ├── notify-volume.sh ├── pacman-disowned.sh ├── pdf-extract.sh ├── perm.sh ├── pythonscripts/ │ ├── __init__.py │ ├── cpu.py │ ├── daemon.py │ ├── ffparser.py │ ├── logger.py │ ├── misc.py │ ├── tempfiles.py │ └── terminal.py ├── qemu-launcher.sh ├── qemu-mac-hasher.py ├── qemu-tap-helper.sh ├── remove-dead-symlinks.sh ├── replaygain.py ├── rexe ├── rmshit.py ├── run-pvserver ├── sway-sensible-terminal ├── teams-attendance-parser.py ├── toggle-touchpad.sh ├── touch-tree.py ├── waybar-khal.py └── x
SYMBOL INDEX (101 symbols across 17 files)
FILE: clean-aur-dir.py
function usage (line 10) | def usage():
FILE: convertToUtf8.py
function is_utf8 (line 9) | def is_utf8(filepath):
function to_utf8 (line 18) | def to_utf8(path):
function run (line 32) | def run():
FILE: fmount.py
function reformat_mntopts (line 15) | def reformat_mntopts(mntopts):
function mount_gio (line 23) | def mount_gio(*, scheme: str, host: str, path: str, user: str, port: str...
function mount_sshfs (line 70) | def mount_sshfs(*, host: str, path: str, user: str, port: str, mountpoin...
function mount (line 87) | def mount(name, mountpath: Path, config):
function umount (line 118) | def umount(mntpoint: Path):
function clean (line 134) | def clean(path: Path):
function cleanAll (line 144) | def cleanAll(mountpath):
function writeDefaultConfig (line 151) | def writeDefaultConfig():
FILE: imap-notifier.py
function load_config (line 85) | def load_config(config_path: Path):
function decode_header (line 106) | def decode_header(header):
class IMAPNotifier (line 123) | class IMAPNotifier:
method __init__ (line 132) | def __init__(self):
method load_state (line 139) | def load_state(self):
method save_state (line 147) | def save_state(self):
method get_account_id (line 156) | def get_account_id(self, account_config):
method get_password (line 167) | def get_password(self, account_config):
method send_notification (line 190) | def send_notification(self, message):
method is_connection_alive (line 217) | def is_connection_alive(self, connection, account_id):
method connect_to_account (line 227) | async def connect_to_account(self, account_config):
method get_new_emails (line 259) | async def get_new_emails(self, connection, account_id, mailboxes_to_pr...
method process_mailboxes (line 324) | async def process_mailboxes(self, account_config, account_id, connecti...
method process_account (line 371) | async def process_account(self, account_config):
method run (line 398) | async def run(self):
function main_async (line 420) | async def main_async():
function main (line 444) | def main():
FILE: maildir-strip-attachments.py
function process_maildir (line 32) | def process_maildir(maildir):
function argtype_dir_path (line 55) | def argtype_dir_path(string):
function argtype_maildir (line 60) | def argtype_maildir(string):
FILE: mp3convert.py
class GettingBitrateError (line 23) | class GettingBitrateError(Exception):
method __init__ (line 24) | def __init__(self, fname):
class ConversionError (line 28) | class ConversionError(Exception):
method __init__ (line 29) | def __init__(self, fname, status, output):
function get_bitrate (line 33) | def get_bitrate(filename):
function convert (line 43) | def convert(filename, output_extension, bitrate, delete_after=False):
class Main (line 57) | class Main():
method __init__ (line 58) | def __init__(self, args):
method print_stats (line 73) | def print_stats(self):
method check (line 84) | def check(self, path):
method run (line 107) | async def run(self):
method worker (line 122) | def worker(self, path):
method queue_generator (line 146) | def queue_generator(self):
FILE: pythonscripts/cpu.py
function cores_count (line 3) | def cores_count():
FILE: pythonscripts/daemon.py
function spawnDaemon (line 5) | def spawnDaemon(*args, detach_fds=True):
FILE: pythonscripts/ffparser.py
class FFprobeParser (line 12) | class FFprobeParser:
method __init__ (line 13) | def __init__(self, path):
method _get (line 25) | def _get(self, option, attribute):
method _getBitrate (line 28) | def _getBitrate(self, option):
method get (line 45) | def get(self, option, attribute):
method pprint (line 57) | def pprint(self, option):
FILE: pythonscripts/logger.py
class Logger (line 9) | class Logger:
method __init__ (line 10) | def __init__(self, log_level, prog_name):
method prefix (line 15) | def prefix(self, msg):
method debug (line 20) | def debug(self, msg):
method info (line 24) | def info(self, msg):
method warning (line 28) | def warning(self, msg):
method error (line 32) | def error(self, msg):
method critical (line 36) | def critical(self, msg, retval=1):
FILE: pythonscripts/misc.py
function format_sizeof (line 18) | def format_sizeof(num, unit_format="long"):
function format_time (line 34) | def format_time(seconds):
function cat (line 55) | def cat(fname):
function smart_truncate (line 74) | def smart_truncate(text, max_length=100, suffix='...'):
function mkdir (line 89) | def mkdir(path):
FILE: pythonscripts/tempfiles.py
class TempFiles (line 11) | class TempFiles:
method __init__ (line 12) | def __init__(self):
method removeAll (line 16) | def removeAll(self):
method remove (line 20) | def remove(self, file):
method getTempFileName (line 25) | def getTempFileName(self, prefix="tmp", suffix="", dir=None, text=False):
FILE: pythonscripts/terminal.py
function colorize (line 11) | def colorize(color, text):
function getColor (line 20) | def getColor(status, download_speed=0):
function getTerminalSize (line 47) | def getTerminalSize():
FILE: replaygain.py
class ReplayGain (line 15) | class ReplayGain:
method __init__ (line 19) | def __init__(self, logger, options, files):
method run (line 35) | def run(self):
method all_files_have_replaygain_tags (line 43) | def all_files_have_replaygain_tags(self):
method run_mp3gain (line 53) | def run_mp3gain(self):
method update_tags (line 77) | def update_tags(self):
class Main (line 128) | class Main:
method __init__ (line 132) | def __init__(self, logger, options):
method run (line 140) | async def run(self):
method worker (line 153) | def worker(self, paths):
method queue_generator (line 173) | def queue_generator(self):
function main (line 202) | def main(prog_name, options):
function argparse_path_handler (line 209) | def argparse_path_handler(path):
FILE: rmshit.py
function get_size (line 64) | def get_size(path):
function read_config (line 70) | def read_config():
function yesno (line 86) | def yesno(question, default="n"):
function format_size (line 103) | def format_size(size_in_bytes):
function rmshit (line 116) | def rmshit():
FILE: teams-attendance-parser.py
function parse_timestamp (line 23) | def parse_timestamp(timestamp):
function parse_attendance_list (line 33) | def parse_attendance_list(path):
function get_attendance (line 58) | def get_attendance(class_start, actions):
function print_attendance (line 84) | def print_attendance(teacher, class_start, data):
function main (line 96) | def main(path):
FILE: touch-tree.py
class Main (line 11) | class Main:
method __init__ (line 12) | def __init__(self, oldRoot, newRoot):
method browse (line 16) | def browse(self, path):
method touchTree (line 26) | def touchTree(self):
Condensed preview — 60 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (178K chars).
[
{
"path": ".gitmodules",
"chars": 73,
"preview": "[submodule \"submodules/cp-p\"]\n\tpath = submodules/cp-p\n\turl = ../cp-p.git\n"
},
{
"path": "Colours-EyeCandy/colourbars",
"chars": 1590,
"preview": "#!/bin/sh\n# by Him on the Arch boards\n# ANSI Color -- use these variables to easily have different color\n# and format"
},
{
"path": "Colours-EyeCandy/colours",
"chars": 1090,
"preview": "#!/bin/bash\n# Original: http://frexx.de/xterm-256-notes/\n# http://frexx.de/xterm-256-notes/data/colortable16.s"
},
{
"path": "Colours-EyeCandy/colourtheme",
"chars": 791,
"preview": "#!/bin/bash\n#\n# This file echoes a bunch of color codes to the \n# terminal to demonstrate what's available. Each \n#"
},
{
"path": "Colours-EyeCandy/hypnotoad.pl",
"chars": 3823,
"preview": "#!/usr/bin/perl\n\n# script by karabaja4\n# mail: karabaja4@archlinux.us\n\nmy $blackFG_yellowBG = \"\\e[30;43m\";\nmy $blackFG_r"
},
{
"path": "Colours-EyeCandy/pacman.sh",
"chars": 3729,
"preview": "#!/bin/sh\n\n# ANSI Color -- use these variables to easily have different color\n# and format output. Make sure to outpu"
},
{
"path": "Colours-EyeCandy/spacey.sh",
"chars": 1559,
"preview": "#!/bin/bash\n#ANSI color scheme script featuring Space Invaders\n#\n# Original: http://crunchbanglinux.org/forums/post/1269"
},
{
"path": "Colours-EyeCandy/tanks.sh",
"chars": 3050,
"preview": "#!/bin/sh\n\n# ANSI Color -- use these variables to easily have different color\n# and format output. Make sure to outpu"
},
{
"path": "README.md",
"chars": 72,
"preview": "A bunch of scripts I keep in `~/Scripts`, which is included in `$PATH`.\n"
},
{
"path": "aur-check",
"chars": 3611,
"preview": "#! /usr/bin/env python3\n\n\"\"\"\nCheck the repo for problems and new package versions\n\"\"\"\n\nimport subprocess\nfrom pathlib im"
},
{
"path": "aur-release",
"chars": 10169,
"preview": "#!/bin/bash\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n# aur-remotebuild - build packages remotely using aur-chroot\n"
},
{
"path": "aur-remotebuild",
"chars": 5368,
"preview": "#!/bin/bash\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n# aur-remotebuild - build packages remotely using aur-chroot\n"
},
{
"path": "backup-system.sh",
"chars": 1268,
"preview": "#!/bin/bash\n\n# exit on first error\nset -e\n\nbackupdir=\"/media/WD-black/backups\"\n\n# check if destination dir exists\nif [[ "
},
{
"path": "batmanpager",
"chars": 281,
"preview": "#!/bin/sh\n\n# mandoc passes a file name, other tools write to stdout\n# using `cat \"$@\"` we take care of both reading from"
},
{
"path": "bsnap.sh",
"chars": 2527,
"preview": "#! /usr/bin/bash\n\n# exit on first error\nset -e\n\nbackupdir=\"$HOME/_backup_snapshots\"\n\nusage() {\n echo $@ >&2\n echo "
},
{
"path": "btrfs-diff",
"chars": 1119,
"preview": "#!/bin/bash\n\n# Author: http://serverfault.com/users/96883/artfulrobot\n# License: Unknown\n#\n# This script will show most "
},
{
"path": "btrfs-sync",
"chars": 9521,
"preview": "#!/bin/bash\n\n#\n# Simple script that synchronizes BTRFS snapshots locally.\n# Features compression, retention policy and a"
},
{
"path": "btrfs-sync-WIP",
"chars": 14065,
"preview": "#!/bin/bash\n\nset -o errtrace\n\nversion=\"0.0\"\nname=\"btrfs-sync\"\n\nSNAPPER_CONFIG=/etc/conf.d/snapper\n\nTMPDIR=$(mktemp -d)\nP"
},
{
"path": "clean-aur-dir.py",
"chars": 1436,
"preview": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport re\nimport subprocess\n\npkgname_regex = re.compile(\"^(?P<pkgname>[a-z0"
},
{
"path": "convertToUtf8.py",
"chars": 1367,
"preview": "#! /usr/bin/env python\n\nimport sys\nimport os\nimport traceback\n\nCHARSETS = (\"ascii\", \"cp1250\", \"cp1252\", \"iso-8859-9\", \"i"
},
{
"path": "fatcp",
"chars": 1630,
"preview": "#! /usr/bin/bash\n\n# Script for safe copying to FAT32 filesystems.\n# All bad characters are replaced by '_' (underscore) "
},
{
"path": "ffparser.py",
"chars": 1212,
"preview": "#! /usr/bin/env python\n\nimport argparse\n\nfrom pythonscripts.ffparser import FFprobeParser\n\n\nif __name__ == \"__main__\":\n "
},
{
"path": "fmount.py",
"chars": 7380,
"preview": "#! /usr/bin/env python3\n\nimport argparse\nimport configparser\nimport os\nimport subprocess\nimport sys\nfrom pathlib import "
},
{
"path": "forcemp3convert.sh",
"chars": 328,
"preview": "#! /bin/bash\n\n# forcefully convert any file to mp3 (with fixed bitrate), preserving metadata (if possible)\n\nset -e\n\nfor "
},
{
"path": "hddtemp.sh",
"chars": 397,
"preview": "#!/bin/bash\n\ndevices=\"$@\"\ndevices=${devices:-/dev/sda}\n\nfor device in $devices; do\n cmd=\"smartctl -d ata -a $device |"
},
{
"path": "imap-notifier.py",
"chars": 16012,
"preview": "#!/usr/bin/env python3\n\nimport asyncio\nimport email.header\nimport email.parser\nimport imaplib\nimport json\nimport logging"
},
{
"path": "img2pdf.sh",
"chars": 445,
"preview": "#!/bin/bash\n\nset -e\n\noutfile=$1\next=tif\n\necho \"Converting images to pdf...\"\ndeclare -a pages\n# take input pattern \"anyth"
},
{
"path": "initscreen.sh",
"chars": 2916,
"preview": "#! /bin/bash\n\n# exit on error\nset -e\n\n#hdmi=$(cat /sys/class/drm/card0-HDMI-A-1/status)\n#vga=$(cat /sys/class/drm/card0-"
},
{
"path": "maildir-strip-attachments.py",
"chars": 2268,
"preview": "#!/usr/bin/env python3\n\n# Documentation:\n# - https://docs.python.org/3/library/mailbox.html#mailbox.Maildir\n# - https://"
},
{
"path": "makeissue.sh",
"chars": 1046,
"preview": "echo -e '\\e[H\\e[2J' > issue\necho -e ' \\e[1;30m| \\e[34m\\\\s \\\\r"
},
{
"path": "mp3convert.py",
"chars": 7421,
"preview": "#! /usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport asyncio\nfrom concurrent.futures import ThreadPoolEx"
},
{
"path": "nat-launch-subnet.sh",
"chars": 5953,
"preview": "#!/bin/bash\n\n\nfunction print_launch_subnet_usage()\n{\n echo \"USAGE\"\n echo \" $0 <up|down>\"\n cat <<'CONFIG'\n\nREQUIRED V"
},
{
"path": "nat-launch.sh",
"chars": 694,
"preview": "#!/bin/bash\n\n# Original author: Xyne\n# http://xyne.archlinux.ca/notes/network/dhcp_with_dns.html\n\nfunction print_usage()"
},
{
"path": "notify-brightness.sh",
"chars": 585,
"preview": "#! /bin/bash\n\n# LCD brightness notification (level changed by ACPI, no action required)\n\n# duration in ms\nduration=1500\n"
},
{
"path": "notify-volume.sh",
"chars": 1219,
"preview": "#!/bin/bash\n\n# volume control (up/down/mute/unmute/toggle) + notification\n\n# duration in ms\nduration=1500\n\nnotify () {\n "
},
{
"path": "pacman-disowned.sh",
"chars": 264,
"preview": "#!/bin/sh\n\ntmp=${TMPDIR-/tmp}/pacman-disowned-$UID-$$\ndb=$tmp/db\nfs=$tmp/fs\n\nmkdir \"$tmp\"\ntrap 'rm -rf \"$tmp\"' EXIT\n\npac"
},
{
"path": "pdf-extract.sh",
"chars": 1240,
"preview": "#!/bin/bash\n\n# exit on error\nset -e\n\nany2img() {\n convert -density 150 \"$1\" -quality 100 \"$2\" &>/dev/null\n}\n\npdf2img("
},
{
"path": "perm.sh",
"chars": 637,
"preview": "#!/bin/bash\n\nopt=${1:-'-h'}\ndir=${2:-'.'}\n\nfmode=0644\ndmode=0755\n\ncase \"$1\" in\n -a) # dirs and files\n find \"$2"
},
{
"path": "pythonscripts/__init__.py",
"chars": 203,
"preview": "#!/usr/bin/env python\n\nimport os\nimport sys\n\n# hack - enable importing from _this_ directory\nsys.path.append(os.path.dir"
},
{
"path": "pythonscripts/cpu.py",
"chars": 309,
"preview": "#! /usr/bin/env python3\n\ndef cores_count():\n f = open(\"/proc/cpuinfo\")\n for line in f.readlines():\n if line"
},
{
"path": "pythonscripts/daemon.py",
"chars": 1797,
"preview": "#! /usr/bin/env python\n\nimport os\n\ndef spawnDaemon(*args, detach_fds=True):\n \"\"\"Spawn a completely detached subproces"
},
{
"path": "pythonscripts/ffparser.py",
"chars": 1982,
"preview": "#!/usr/bin/env python\n\nimport json\nimport subprocess\nimport shlex\nfrom pprint import pprint\n\n\nffprobe = \"ffprobe -v quie"
},
{
"path": "pythonscripts/logger.py",
"chars": 892,
"preview": "#! /usr/bin/env python\n\n\"\"\"\nSimple logger object. Log level is integer for easy comparison.\n\"\"\"\n\nimport sys\n\nclass Logge"
},
{
"path": "pythonscripts/misc.py",
"chars": 2184,
"preview": "#! /usr/bin/env python\n\n\"\"\"\nHuman-readable file size. Algorithm does not use a for-loop. It has constant\ncomplexity, O(1"
},
{
"path": "pythonscripts/tempfiles.py",
"chars": 757,
"preview": "#! /usr/bin/env python\n\n\"\"\"\nCreate temporary file, close file descriptor and return full path of the file.\n\"\"\"\n\nimport o"
},
{
"path": "pythonscripts/terminal.py",
"chars": 1651,
"preview": "#! /usr/bin/env python\n\n\"\"\"\nLinux terminal colors.\n\"\"\"\n\n#import sys\n\nCOLORS = {\"black\":30, \"red\":31, \"green\":32, \"yellow"
},
{
"path": "qemu-launcher.sh",
"chars": 4644,
"preview": "#! /usr/bin/bash\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\nfunction print_usage() {\n echo"
},
{
"path": "qemu-mac-hasher.py",
"chars": 334,
"preview": "#!/usr/bin/env python\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\nimport sys\nimport zlib\n\nif l"
},
{
"path": "qemu-tap-helper.sh",
"chars": 3419,
"preview": "#! /usr/bin/bash\n\n# Author: Jakub Klinkovský (Lahwaacz)\n# https://github.com/lahwaacz\n\n########## Functions ##########\n\n"
},
{
"path": "remove-dead-symlinks.sh",
"chars": 308,
"preview": "#! /bin/bash\n\n# recursively remove dead symlinks\n\nshopt -s globstar\n\n# non-recursive version: 'for itm in *'\nfor itm in "
},
{
"path": "replaygain.py",
"chars": 8819,
"preview": "#! /usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport subprocess\nimport asyncio\nfrom concurrent.futures i"
},
{
"path": "rexe",
"chars": 3098,
"preview": "#!/bin/bash\n\nset -e\n\nHOST=\"\"\nLOCAL_PATH=\"\"\nREMOTE_PATH=\"\"\nREXE_DIR=\"rexe\"\nCMD=\"\"\nDOWNLOAD=\"true\"\nEXCLUDE=()\nEXCLUDE_DOWN"
},
{
"path": "rmshit.py",
"chars": 3645,
"preview": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nfrom pathlib import Path\n\nimport yaml\n\nDEFAULT_CONFIG = \"\"\"\n- ~/.adobe "
},
{
"path": "run-pvserver",
"chars": 402,
"preview": "#!/bin/bash\n\nhost=\"$1\"\n\nif [[ \"$host\" == \"\" ]]; then\n echo \"usage: $0 [user@]hostname\"\n exit 1\nfi\n\nhostname=$(ssh "
},
{
"path": "sway-sensible-terminal",
"chars": 1242,
"preview": "#! /usr/bin/env python3\n\nimport os\nimport sys\nimport json\nimport subprocess\n\nPATH = os.environ.get(\"PATH\", \"/usr/bin\")\nT"
},
{
"path": "teams-attendance-parser.py",
"chars": 3210,
"preview": "#! /usr/bin/env python3\n\n\"\"\"\nTHE BEER-WARE LICENSE (Revision 42):\nJakub Klinkovský wrote this file. As long as you retai"
},
{
"path": "toggle-touchpad.sh",
"chars": 473,
"preview": "#!/bin/sh\n# Toggle touchpad status\n# Using libinput and xinput\n\n# Use xinput list and do a search for touchpads. Then ge"
},
{
"path": "touch-tree.py",
"chars": 1142,
"preview": "#! /usr/bin/env python\n\n# Little script to \"touch\" directory structure.\n# Works like 'cp -r', but instead of copying ful"
},
{
"path": "waybar-khal.py",
"chars": 684,
"preview": "#! /usr/bin/env python3\n\nimport subprocess\nimport json\n\ndata = {}\n\ncmd = [\n \"khal\",\n \"list\",\n \"now\",\n \"23:59"
},
{
"path": "x",
"chars": 2405,
"preview": "#! /bin/bash\n\n# Some references:\n# https://wiki.archlinux.org/index.php/Bash#Functions\n# https://github.com/robbyrussell"
}
]
About this extraction
This page contains the full source code of the lahwaacz/Scripts GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 60 files (158.0 KB), approximately 46.9k tokens, and a symbol index with 101 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.