Repository: kdrag0n/freqbench Branch: master Commit: 9eb16adcec15 Files: 1549 Total size: 22.7 MB Directory structure: gitextract_7njipo89/ ├── .gitignore ├── LICENSE ├── README.md ├── anykernel/ │ ├── LICENSE │ ├── META-INF/ │ │ └── com/ │ │ └── google/ │ │ └── android/ │ │ ├── update-binary │ │ └── updater-script │ ├── anykernel.sh │ └── tools/ │ ├── ak3-core.sh │ ├── busybox │ └── magiskboot ├── bench.py ├── boot-v1/ │ ├── .gitignore │ ├── pack.sh │ ├── test.sh │ └── unpack.sh ├── config.sh ├── dhcpd.conf ├── init.sh ├── mkbootimg.py ├── pack-img.sh ├── pack-zip.sh ├── packrd-gz.sh ├── postprocess/ │ ├── cross_cpu_cluster_graph.py │ ├── cross_cpu_voltage_graph.py │ ├── efficient_freqs.py │ ├── filter_freqs.py │ ├── idle_csv.py │ ├── legacy_energy_model.py │ ├── requirements.txt │ ├── simplified_energy_model.py │ ├── unified_cluster_col.py │ └── unified_cluster_graph.py ├── rd/ │ ├── bin/ │ │ ├── bash │ │ ├── busybox │ │ ├── dmesg │ │ ├── more │ │ ├── mount │ │ ├── mountpoint │ │ └── umount │ ├── dev/ │ │ └── null │ ├── etc/ │ │ ├── alpine-release │ │ ├── apk/ │ │ │ ├── arch │ │ │ ├── keys/ │ │ │ │ ├── alpine-devel@lists.alpinelinux.org-524d27bb.rsa.pub │ │ │ │ └── alpine-devel@lists.alpinelinux.org-58199dcc.rsa.pub │ │ │ ├── repositories │ │ │ └── world │ │ ├── crontabs/ │ │ │ └── root │ │ ├── dhcp/ │ │ │ └── dhcpd.conf.example │ │ ├── dropbear/ │ │ │ └── dropbear_ecdsa_host_key │ │ ├── fstab │ │ ├── group │ │ ├── group- │ │ ├── hostname │ │ ├── hosts │ │ ├── inittab │ │ ├── inputrc │ │ ├── issue │ │ ├── logrotate.d/ │ │ │ └── acpid │ │ ├── modprobe.d/ │ │ │ ├── aliases.conf │ │ │ ├── blacklist.conf │ │ │ ├── i386.conf │ │ │ └── kms.conf │ │ ├── modules │ │ ├── motd │ │ ├── network/ │ │ │ └── if-up.d/ │ │ │ └── dad │ │ ├── os-release │ │ ├── passwd │ │ ├── passwd- │ │ ├── profile │ │ ├── profile.d/ │ │ │ ├── color_prompt │ │ │ └── locale │ │ ├── protocols │ │ ├── resolv.conf │ │ ├── securetty │ │ ├── services │ │ ├── shadow │ │ ├── shadow- │ │ ├── shells │ │ ├── sysctl.conf │ │ ├── terminfo/ │ │ │ ├── a/ │ │ │ │ ├── alacritty │ │ │ │ └── ansi │ │ │ ├── d/ │ │ │ │ └── dumb │ │ │ ├── g/ │ │ │ │ ├── gnome │ │ │ │ └── gnome-256color │ │ │ ├── k/ │ │ │ │ ├── kitty │ │ │ │ ├── konsole │ │ │ │ ├── konsole-256color │ │ │ │ └── konsole-linux │ │ │ ├── l/ │ │ │ │ └── linux │ │ │ ├── p/ │ │ │ │ ├── putty │ │ │ │ └── putty-256color │ │ │ ├── r/ │ │ │ │ ├── rxvt │ │ │ │ └── rxvt-256color │ │ │ ├── s/ │ │ │ │ ├── screen │ │ │ │ ├── screen-256color │ │ │ │ ├── st-0.6 │ │ │ │ ├── st-0.7 │ │ │ │ ├── st-0.8 │ │ │ │ ├── st-16color │ │ │ │ ├── st-256color │ │ │ │ ├── st-direct │ │ │ │ └── sun │ │ │ ├── t/ │ │ │ │ ├── terminator │ │ │ │ ├── terminology │ │ │ │ ├── terminology-0.6.1 │ │ │ │ ├── terminology-1.0.0 │ │ │ │ ├── tmux │ │ │ │ └── tmux-256color │ │ │ ├── v/ │ │ │ │ ├── vt100 │ │ │ │ ├── vt102 │ │ │ │ ├── vt200 │ │ │ │ ├── vt220 │ │ │ │ ├── vt52 │ │ │ │ ├── vte │ │ │ │ └── vte-256color │ │ │ └── x/ │ │ │ ├── xterm │ │ │ ├── xterm-256color │ │ │ ├── xterm-color │ │ │ ├── xterm-kitty │ │ │ └── xterm-xfree86 │ │ └── udhcpd.conf │ ├── lib/ │ │ ├── firmware/ │ │ │ └── .gitignore │ │ ├── ld-musl-aarch64.so.1 │ │ ├── libblkid.so.1.1.0 │ │ ├── libfdisk.so.1.1.0 │ │ ├── libmount.so.1.1.0 │ │ ├── libpopt.so.0.0.0 │ │ ├── libsmartcols.so.1.1.0 │ │ ├── libuuid.so.1.3.0 │ │ └── libz.so.1.2.11 │ ├── root/ │ │ └── .config/ │ │ └── htop/ │ │ └── htoprc │ ├── sbin/ │ │ ├── blkid │ │ ├── fdisk │ │ ├── findfs │ │ ├── fsck │ │ ├── fsfreeze │ │ ├── ldconfig │ │ ├── mkfs │ │ ├── mkmntdirs │ │ └── nologin │ └── usr/ │ ├── bin/ │ │ ├── 2to3-3.8 │ │ ├── chrt │ │ ├── colcrt │ │ ├── coremark │ │ ├── dropbearkey │ │ ├── getopt │ │ ├── hardlink │ │ ├── hexdump │ │ ├── htop │ │ ├── ldd │ │ ├── look │ │ ├── lscpu │ │ ├── mesg │ │ ├── pydoc3.8 │ │ ├── python3.8 │ │ ├── qrtr-ns │ │ ├── reboot_with_cmd │ │ ├── rename │ │ ├── renice │ │ ├── rev │ │ ├── setarch │ │ ├── setsid │ │ ├── ssl_client │ │ ├── taskset │ │ ├── ul │ │ └── uuidgen │ ├── lib/ │ │ ├── bash/ │ │ │ ├── basename │ │ │ ├── dirname │ │ │ ├── fdflags │ │ │ ├── finfo │ │ │ ├── head │ │ │ ├── id │ │ │ ├── ln │ │ │ ├── logname │ │ │ ├── mkdir │ │ │ ├── mypid │ │ │ ├── pathchk │ │ │ ├── print │ │ │ ├── printenv │ │ │ ├── push │ │ │ ├── realpath │ │ │ ├── rmdir │ │ │ ├── seq │ │ │ ├── setpgid │ │ │ ├── sleep │ │ │ ├── strftime │ │ │ ├── sync │ │ │ ├── tee │ │ │ ├── truefalse │ │ │ ├── tty │ │ │ ├── uname │ │ │ ├── unlink │ │ │ └── whoami │ │ ├── libbz2.so.1.0.8 │ │ ├── libcap-ng.so.0.0.0 │ │ ├── libffi.so.7.1.0 │ │ ├── libformw.so.6.2 │ │ ├── libgcc_s.so.1 │ │ ├── libgdbm_compat.so.4.0.0 │ │ ├── liblzma.so.5.2.5 │ │ ├── libmenuw.so.6.2 │ │ ├── libncursesw.so.6.2 │ │ ├── libpanelw.so.6.2 │ │ ├── libpython3.8.so.1.0 │ │ ├── libreadline.so.8.0 │ │ ├── libstdc++.so.6.0.28 │ │ ├── libtls-standalone.so.1.0.0 │ │ └── python3.8/ │ │ ├── LICENSE.txt │ │ ├── __future__.py │ │ ├── __phello__.foo.py │ │ ├── _bootlocale.py │ │ ├── _collections_abc.py │ │ ├── _compat_pickle.py │ │ ├── _compression.py │ │ ├── _dummy_thread.py │ │ ├── _markupbase.py │ │ ├── _osx_support.py │ │ ├── _py_abc.py │ │ ├── _pydecimal.py │ │ ├── _pyio.py │ │ ├── _sitebuiltins.py │ │ ├── _strptime.py │ │ ├── _sysconfigdata__linux_aarch64-linux-gnu.py │ │ ├── _threading_local.py │ │ ├── _weakrefset.py │ │ ├── abc.py │ │ ├── aifc.py │ │ ├── antigravity.py │ │ ├── argparse.py │ │ ├── ast.py │ │ ├── asynchat.py │ │ ├── asyncore.py │ │ ├── base64.py │ │ ├── bdb.py │ │ ├── binhex.py │ │ ├── bisect.py │ │ ├── bz2.py │ │ ├── cProfile.py │ │ ├── calendar.py │ │ ├── cgi.py │ │ ├── cgitb.py │ │ ├── chunk.py │ │ ├── cmd.py │ │ ├── code.py │ │ ├── codecs.py │ │ ├── codeop.py │ │ ├── collections/ │ │ │ ├── __init__.py │ │ │ └── abc.py │ │ ├── colorsys.py │ │ ├── compileall.py │ │ ├── configparser.py │ │ ├── contextlib.py │ │ ├── contextvars.py │ │ ├── copy.py │ │ ├── copyreg.py │ │ ├── crypt.py │ │ ├── csv.py │ │ ├── curses/ │ │ │ ├── __init__.py │ │ │ ├── ascii.py │ │ │ ├── has_key.py │ │ │ ├── panel.py │ │ │ └── textpad.py │ │ ├── dataclasses.py │ │ ├── datetime.py │ │ ├── dbm/ │ │ │ ├── __init__.py │ │ │ ├── dumb.py │ │ │ ├── gnu.py │ │ │ └── ndbm.py │ │ ├── decimal.py │ │ ├── dis.py │ │ ├── dummy_threading.py │ │ ├── encodings/ │ │ │ ├── __init__.py │ │ │ ├── aliases.py │ │ │ ├── ascii.py │ │ │ ├── base64_codec.py │ │ │ ├── big5.py │ │ │ ├── bz2_codec.py │ │ │ ├── charmap.py │ │ │ ├── gbk.py │ │ │ ├── hex_codec.py │ │ │ ├── hz.py │ │ │ ├── idna.py │ │ │ ├── iso8859_1.py │ │ │ ├── latin_1.py │ │ │ ├── mbcs.py │ │ │ ├── oem.py │ │ │ ├── quopri_codec.py │ │ │ ├── raw_unicode_escape.py │ │ │ ├── rot_13.py │ │ │ ├── undefined.py │ │ │ ├── unicode_escape.py │ │ │ ├── utf_16.py │ │ │ ├── utf_16_be.py │ │ │ ├── utf_16_le.py │ │ │ ├── utf_32.py │ │ │ ├── utf_32_be.py │ │ │ ├── utf_32_le.py │ │ │ ├── utf_7.py │ │ │ ├── utf_8.py │ │ │ ├── utf_8_sig.py │ │ │ ├── uu_codec.py │ │ │ └── zlib_codec.py │ │ ├── enum.py │ │ ├── filecmp.py │ │ ├── fileinput.py │ │ ├── fnmatch.py │ │ ├── formatter.py │ │ ├── fractions.py │ │ ├── ftplib.py │ │ ├── functools.py │ │ ├── genericpath.py │ │ ├── getopt.py │ │ ├── getpass.py │ │ ├── gettext.py │ │ ├── glob.py │ │ ├── gzip.py │ │ ├── hashlib.py │ │ ├── heapq.py │ │ ├── hmac.py │ │ ├── imghdr.py │ │ ├── imp.py │ │ ├── importlib/ │ │ │ ├── __init__.py │ │ │ ├── _bootstrap.py │ │ │ ├── _bootstrap_external.py │ │ │ ├── abc.py │ │ │ ├── machinery.py │ │ │ ├── metadata.py │ │ │ ├── resources.py │ │ │ └── util.py │ │ ├── inspect.py │ │ ├── io.py │ │ ├── ipaddress.py │ │ ├── json/ │ │ │ ├── __init__.py │ │ │ ├── decoder.py │ │ │ ├── encoder.py │ │ │ ├── scanner.py │ │ │ └── tool.py │ │ ├── keyword.py │ │ ├── linecache.py │ │ ├── locale.py │ │ ├── lzma.py │ │ ├── mailcap.py │ │ ├── mimetypes.py │ │ ├── modulefinder.py │ │ ├── netrc.py │ │ ├── ntpath.py │ │ ├── nturl2path.py │ │ ├── numbers.py │ │ ├── opcode.py │ │ ├── operator.py │ │ ├── optparse.py │ │ ├── os.py │ │ ├── pathlib.py │ │ ├── pipes.py │ │ ├── pkgutil.py │ │ ├── platform.py │ │ ├── plistlib.py │ │ ├── poplib.py │ │ ├── posixpath.py │ │ ├── pprint.py │ │ ├── profile.py │ │ ├── pstats.py │ │ ├── pty.py │ │ ├── py_compile.py │ │ ├── pyclbr.py │ │ ├── queue.py │ │ ├── quopri.py │ │ ├── random.py │ │ ├── re.py │ │ ├── reprlib.py │ │ ├── rlcompleter.py │ │ ├── runpy.py │ │ ├── sched.py │ │ ├── secrets.py │ │ ├── selectors.py │ │ ├── shelve.py │ │ ├── shlex.py │ │ ├── shutil.py │ │ ├── signal.py │ │ ├── site-packages/ │ │ │ └── README.txt │ │ ├── site.py │ │ ├── smtpd.py │ │ ├── sndhdr.py │ │ ├── socket.py │ │ ├── socketserver.py │ │ ├── sqlite3/ │ │ │ ├── __init__.py │ │ │ ├── dbapi2.py │ │ │ └── dump.py │ │ ├── sre_compile.py │ │ ├── sre_constants.py │ │ ├── sre_parse.py │ │ ├── ssl.py │ │ ├── stat.py │ │ ├── statistics.py │ │ ├── string.py │ │ ├── stringprep.py │ │ ├── struct.py │ │ ├── subprocess.py │ │ ├── sunau.py │ │ ├── symbol.py │ │ ├── symtable.py │ │ ├── sysconfig.py │ │ ├── tabnanny.py │ │ ├── telnetlib.py │ │ ├── tempfile.py │ │ ├── textwrap.py │ │ ├── this.py │ │ ├── threading.py │ │ ├── timeit.py │ │ ├── token.py │ │ ├── tokenize.py │ │ ├── trace.py │ │ ├── traceback.py │ │ ├── tracemalloc.py │ │ ├── tty.py │ │ ├── types.py │ │ ├── typing.py │ │ ├── uu.py │ │ ├── uuid.py │ │ ├── warnings.py │ │ ├── wave.py │ │ ├── weakref.py │ │ ├── webbrowser.py │ │ ├── xdrlib.py │ │ ├── zipapp.py │ │ └── zipimport.py │ ├── sbin/ │ │ ├── dhcpd │ │ └── dropbear │ └── share/ │ ├── applications/ │ │ └── htop.desktop │ └── udhcpc/ │ └── default.script ├── reboot_with_cmd.c ├── results/ │ ├── README.md │ ├── exynos5250/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── voltages.txt │ ├── exynos7880/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── exynos7904/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── exynos9611/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 4/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── gs101/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── gs201/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── msm8917/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ └── 0/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── msm8940/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ └── total_trans │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── old/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 4/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── msm8953/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ └── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── second/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ └── 0/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── msm8996/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── msm8998/ │ │ ├── fts-irq-storm/ │ │ │ ├── cmdline.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ └── versions.txt │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── mt6768/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── mt6853t/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm439/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── olive/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm630/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── second/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm632/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 4/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm636/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ └── total_trans │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── second/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm660/ │ │ ├── fourth/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── second/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── third/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm670/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm710/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm712/ │ │ ├── RMX1921/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── RMX1971/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ └── total_trans │ │ │ │ └── 6/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ └── total_trans │ │ │ │ └── 6/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── second/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ └── total_trans │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ └── total_trans │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sdm845/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm6115/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── second/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 4/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm6125/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm6150/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── dev.list │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm6150ac/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ ├── trans_table │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── new/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7125/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7150/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 6/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── second/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 6/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── third/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7150ab/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── sunfish/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7150ac/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7225/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 6/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── m23xq/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 6/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm7250ab/ │ │ ├── 250kiter-headless/ │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ └── run.log │ │ ├── dyniter-headless/ │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ └── run.log │ │ ├── efficient_voltages.txt │ │ ├── main/ │ │ │ ├── clusters_coremarks.csv │ │ │ ├── clusters_power.csv │ │ │ ├── idle.csv │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ └── run.log │ │ ├── new/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 6/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ └── versions.txt │ │ └── voltages.txt │ ├── sm8150/ │ │ ├── eff_freqs.txt │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── eff_results.json │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ └── versions.txt │ │ ├── new/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── voltages.txt │ ├── sm8150ac/ │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ └── versions.txt │ │ ├── new/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── voltages.txt │ ├── sm8250/ │ │ ├── k30s/ │ │ │ ├── cmdline.txt │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── voltages_k30su.txt │ │ └── voltages_mi10tp.txt │ ├── sm8250ac/ │ │ ├── cmdline.txt │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── sm8350/ │ │ ├── fifth/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── fourth/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── main/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── second/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ ├── 4/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 7/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── third/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 7/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── t8103/ │ │ ├── asahi-cpufreq/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── asahi-cpufreq-max2988/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── asahi-cpufreq-single-pcore/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ ├── corellium-cpufreq/ │ │ │ ├── cmdline.txt │ │ │ ├── cpufreq_stats/ │ │ │ │ ├── 0/ │ │ │ │ │ ├── time_in_state │ │ │ │ │ ├── total_trans │ │ │ │ │ └── trans_table │ │ │ │ └── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── cpuinfo.txt │ │ │ ├── device.txt │ │ │ ├── kernel.log │ │ │ ├── post_bench_interrupts.txt │ │ │ ├── pre_bench_interrupts.txt │ │ │ ├── processes.txt │ │ │ ├── results.csv │ │ │ ├── results.json │ │ │ ├── run.log │ │ │ ├── uptime.txt │ │ │ └── versions.txt │ │ └── corellium-cpufreq-max2988/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 4/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ ├── zuma/ │ │ └── main/ │ │ ├── cmdline.txt │ │ ├── cpufreq_stats/ │ │ │ ├── 0/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ ├── 4/ │ │ │ │ ├── time_in_state │ │ │ │ ├── total_trans │ │ │ │ └── trans_table │ │ │ └── 8/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── cpuinfo.txt │ │ ├── device.txt │ │ ├── kernel.log │ │ ├── post_bench_interrupts.txt │ │ ├── pre_bench_interrupts.txt │ │ ├── processes.txt │ │ ├── results.csv │ │ ├── results.json │ │ ├── run.log │ │ ├── uptime.txt │ │ └── versions.txt │ └── zumapro/ │ └── main/ │ ├── cmdline.txt │ ├── cpufreq_stats/ │ │ ├── 0/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ ├── 4/ │ │ │ ├── time_in_state │ │ │ ├── total_trans │ │ │ └── trans_table │ │ └── 7/ │ │ ├── time_in_state │ │ ├── total_trans │ │ └── trans_table │ ├── cpuinfo.txt │ ├── device.txt │ ├── kernel.log │ ├── post_bench_interrupts.txt │ ├── pre_bench_interrupts.txt │ ├── processes.txt │ ├── results.csv │ ├── results.json │ ├── run.log │ ├── uptime.txt │ └── versions.txt ├── test-img.sh └── usb.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ *.img *.dtb *.lz4 *.cpio *.zip *.pyc rd/bench.py rd/dhcpd.conf rd/init rd/usb.sh rd/config.sh ================================================ FILE: LICENSE ================================================ The MIT License (MIT) Copyright (c) 2020 Danny Lin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # freqbench ![Power usage in mW per frequency per cluster for Qualcomm Snapdragon 835, 855, and 765G](https://user-images.githubusercontent.com/7930239/101429518-fb3a3a80-38b7-11eb-8005-5edf2d12a4d6.png) freqbench is a comprehensive CPU benchmark that benchmarks each CPU frequency step on each frequency scaling domain (e.g. ARM DynamIQ/big.LITTLE cluster). It is based on a minimal Alpine Linux userspace with the [EEMBC CoreMark](https://www.eembc.org/coremark/) workload and a Python benchmark coordinator. Results include: - Performance (CoreMark scores) - Performance efficiency (CoreMarks per MHz) - Power usage (in milliwatts) - Energy usage (in millijoules and joules) - Energy efficiency (ULPMark-CM scores: iterations per second per millijoule of energy used) - Baseline power usage - Time elapsed - CPU frequency scaling stats during the benchmark (for validation) - Diagnostic data (logs, kernel version, kernel command line, interrupts, processes) - Raw power samples in machine-readable JSON format (for postprocessing) ## Why? A benchmark like this can be useful for many reasons: - Creating energy models for EAS (Energy-Aware Scheduling) - Correcting inaccurate EAS energy models - Analyzing performance and power trends - Comparing efficiency across SoC and CPU generations - Improving performance and battery life of mobile devices by utilizing the race-to-idle phenomenon with efficient frequencies ## Usage It is possible to use freqbench with a stock kernel, but a custom kernel is **highly recommended** for accuracy. Stock OEM kernels are almost always missing features that the benchmark coordinator relies on for maximum accuracy. Custom kernel results are eligible for high accuracy classification, while stock kernel results are limited to low accuracy. Use a stock kernel at your own risk. ### Custom kernel (recommended) Set the following kernel config options: ```bash CONFIG_NO_HZ_FULL=y CONFIG_CPU_FREQ_TIMES=n # may not exist CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_HZ_100=y ``` Example commit: [kirin_defconfig: Configure for freqbench](https://github.com/kdrag0n/proton_zf6/commit/59d24abf40dec) If you have any commits that prevent userspace from controlling CPU affinities and utilization, frequencies, or anything of the sort, revert them for the benchmark to work properly. Here are some common examples of such commits in downstream kernels and their corresponding reverts: - [Performance-critical IRQs and kthreads](https://github.com/kdrag0n/proton_kernel_wahoo/commit/29b315cd5f3a6) - [Existing efficient frequency tables](https://github.com/kdrag0n/proton_kernel_wahoo/commit/9b98ee3fabd14) - [Preventing userspace from setting minimum CPU frequencies](https://github.com/kdrag0n/proton_kernel_wahoo/commit/d9d2fe54e87f9) - [Ratelimiting fuel gauge queries](https://github.com/kdrag0n/proton_kernel_wahoo/commit/87ac3f89c7392) Example freqbench kernel adaptations: - [Pixel 2, msm-4.4](https://github.com/kdrag0n/proton_kernel_wahoo/commits/alpine-fbench) - [ZenFone 6, msm-4.14](https://github.com/kdrag0n/proton_zf6/commits/alpine-fbench-basic-example) - [Pixel 5, msm-4.19](https://github.com/kdrag0n/proton_kernel_redbull/commits/alpine-fbench-basic-example) (this device uses boot image v3, so it follows the manual boot image guide below) Compile and flash your new kernel. Note that Android will not work properly on this kernel, so make sure you take a backup of your old boot image to restore later. If necessary, adjust the config parameters in `config.sh`. Most modern devices will not need any changes. Run `pack-zip.sh` and flash `freqbench-installer.zip`. Unplug the device immediately, before the device starts booting. Do not try to wait for it to finish booting. Leaving the device plugged in will invalidate all power results. Finally, wait until the device reboots itself. Do not touch the device, any of its buttons, or plug/unplug it during the test. It will be frozen on the bootloader splash screen; do not assume that it is broken. The benchmark is expected to take a long time; 1 hour is reasonable for a slower CPU. Once the benchmark is done, retrieve the results from `/cache/freqbench` if your device has a cache partition, or `/persist/freqbench` otherwise (newer devices with A/B partitions don't have a cache partition). If you are able to retrieve results, **please consider [contributing your results](#contributing-results)!** It's very helpful for me to see how well freqbench is working, and enables anyone to analyze results across different SoCs that they don't have. If you have any problems, check the troubleshooting section before opening an issue. ### Manual boot image creation Manually creating a new boot image with the kernel and ramdisk is only for advanced users. Use the AnyKernel3 installer unless you have good reason to do this. Additional kernel config options: ```bash CONFIG_CMDLINE="rcu_nocbs=0-7 isolcpus=1-7 nohz_full=1-7 loglevel=0 printk.devkmsg=on" CONFIG_CMDLINE_EXTEND=y ``` If you don't have 8 CPU cores, adjust `0-7` to `0-` and `1-7` to `1-` where appropriate. Single-core CPUs are not supported. Be careful when adjusting the CPU sets as `rcu_nocbs` starts at CPU 0 while all other parameters start at CPU 1. Create a boot image with your modified kernel and the freqbench ramdisk: For boot image v0/v1 devices: ```bash cd boot-v1 ./unpack.sh path/to/normal/boot.img ./pack.sh # New boot image will be created as new.img ``` For boot image v3 devices: ```bash # Extract values from boot.img and update pack-img.sh accordingly ./pack-img.sh # New boot image will be created as bench.img ``` After that, boot the modified image with `fastboot boot` if your device supports it, or flash it to the boot/recovery partition and boot that manually. ## Results After the benchmark finishes, results can be found in `/cache/freqbench`, `/persist/freqbench`, or `/mnt/vendor/persist/freqbench`, in that order of preference. The first path that exists on your device will be used. Human-readable results, raw machine-readable JSON data, and diagnostic information are included for analysis. If you got this far, please consider contributing your results to help freqbench evolve and gather data about different SoCs: ### Contributing results If you run the benchmark on a SoC that is not [already included](https://github.com/kdrag0n/freqbench/tree/master/results), **please contribute your results!** It's very helpful for me to see how well freqbench is working, and enables anyone to analyze results across different SoCs that they don't have. Contributing your results is easy: 1. [Fork this repository](https://github.com/kdrag0n/freqbench/) 2. Add your **entire results folder** (not just one file from it) to `results/socname/main`, replacing `socname` with the model name of your SoC in lowercase 3. [Open a pull request](https://github.com/kdrag0n/freqbench/compare) If you don't know your SoC's model name, search the name of your SoC (e.g. [Snapdragon 855](https://www.qualcomm.com/products/snapdragon-855-mobile-platform)) and find the part number from the SoC manufacturer. You can also get it from your kernel source code and/or `device.txt` or `cpuinfo.txt` in the freqbench results. If you are still unsure, feel free to open an issue or guess the name. Example names: - `sm8150` - `sm8150ac` - `sm7250ab` - `exynos8895` - `mt6889` Identifiable information such as the device serial number is automatically redacted by freqbench, so it should not be a problem. Don't worry about getting something wrong; I would much rather have results submitted with mistakes than nothing at all. ## Post-processing Several post-processing scripts, all written in Python and some using `matplotlib`, are available: ### Legacy energy model Create a legacy EAS energy model for use with older kernels. Optional argument after path to results: `key_type/value_type` Key types: - Frequency (default) - looks like `652800` or `2323200` - Capacity - looks like `139` or `1024` You must use the correct key type for your kernel. When in doubt, refer to your original energy model and check which one the numbers look more like. In general, downstream Qualcomm kernels will use the following key types depending on version: - 3.18: capacity - 4.4: capacity - 4.9: frequency - 4.14: frequency - 4.19: N/A (uses simplified energy model instead) Modifying your kernel to switch from one to the other is left as an exercise for the reader. Value types: - Power (default) - Energy (experimental, not recommended) Do not change the value type unless you know what you're doing. The energy type only exists for testing purposes; do not expect it to work properly. Once you have a full energy model generated, pick out the parts you need and incorporate them into your SoC device tree. In general, kernels 4.19 need `capacity-dmips-mhz`, while older kernels need `efficiency` when it comes to the contents of the CPU sections. If you have an existing energy model that you want to use for idle and cluster costs, add it as an argument. Example usage: `./legacy_energy_model.py results.json cap/power old_model.dtsi` ### Simplified energy model Create a simplified EAS energy model for use with newer kernels. Because voltages defined by the CPU frequency scaling driver cannot easily be accessed from userspace, you will need to provide them. Pass the voltage for each frequency step as an argument: `cpu#.khz=microvolts` For Qualcomm SoCs on the msm-4.19 kernel, voltages can be obtained by booting the kernel (with or without freqbench doesn't matter, as long as you can get kernel logs) with [this commit](https://github.com/kdrag0n/proton_kernel_redbull/commit/8db0557716a4) and searching for lines containing `volt=` in the kernel log. For msm-4.9 and msm-4.14, the process is the same but with [this commit](https://github.com/kdrag0n/proton_zf6/commit/f7cc2d654f1b9) and searching for `open_loop_voltage` instead. Example usage: `./simplified_energy_model.py results.json 1.300000=580000 1.576000=580000 1.614400=580000 1.864000=644000 1.1075200=708000 1.1363200=788000 1.1516800=860000 1.1651200=888000 1.1804800=968000 6.652800=624000 6.940800=672000 6.1152000=704000 6.1478400=752000 6.1728000=820000 6.1900800=864000 6.2092800=916000 6.2208000=948000 7.806400=564000 7.1094400=624000 7.1401600=696000 7.1766400=776000 7.1996800=836000 7.2188800=888000 7.2304000=916000 7.2400000=940000` ### Efficient frequencies (experimental) Derive a list of efficient frequencies for each cluster and create a new results.json with only those frequencies included. Note that this script is **experimental** and may not produce optimal results. Manual tuning of the resulting frequency tables is recommended. Example usage: `./efficient_freqs.py results.json eff_results.json` ### Filter frequencies Create a new results.json with only the specified frequencies included. Example usage: `./filter_freqs.py results.json filtered_results.json 1.1516800 1.1804800 6.1478400 6.1728000 6.2208000 7.1766400 7.2188800 7.2304000 7.2400000` ### Cross-CPU cluster graph ![Performance (iter/s) across 835, 855, and 765G](https://user-images.githubusercontent.com/7930239/101309012-19446400-3800-11eb-8418-bb9293b08871.png) Graph a value for each cluster across different SoCs/CPUs. Arguments: - Add a SoC: `SoC-1:soc1/results.json` - Specify the value to graph: `load/value` (load is idle/active) - Set a flag: `+flagname` (soccolor, minscl) Example usage: `./cross_cpu_cluster_graph.py 835:results/p2/main/results.json 855:results/zf6/main/results.json 855+:results/rog2/main/results.json 765G:results/p5/new-final/results.json active/power_mean +soccolor +minscl` ### Unified cluster graph ![Performance (iter/s) across 765G little, big, and prime clusters](https://user-images.githubusercontent.com/7930239/101309506-712f9a80-3801-11eb-9ae6-8dba84f063d4.png) Graph a value for each cluster within the same SoC/CPU. Example usage: `./unified_cluster_graph.py results.json coremark_score` ### Unified cluster column Extract a value for each cluster within the same SoC/CPU and write the results into a CSV file. Example usage: `./unified_cluster_csv.py results.json coremark_score cm_scores.csv` ## Troubleshooting ### Kernel panics on boot If your kernel panics on boot, disable `CONFIG_CPU_FREQ_STAT`. If that causes the kernel to fail to compile, cherry-pick [cpufreq: Fix build with stats disabled](https://github.com/kdrag0n/proton_kernel_wahoo/commit/21e76d090e092). ### Results vary too much Check kernel.log, pre- and post-bench interrupts, running processes, and cpufreq stats from the results directory to diagnose the issue. ### It's still running after an hour If you have a slow CPU with a lot of frequency steps, this is not entirely unreasonable. ### I want to debug it while it's running freqbench offers interactive debugging via SSH over virtual USB Ethernet; the device acts as a USB Ethernet adapter and exposes an SSH server on the internal network. This feature can be enabled with the `USB_DEBUG` option in `config.sh`. It is disabled by default to avoid unnecessary USB setup that may influence benchmark results, so keeping it enabled for a final benchmark run is not recommended. `CONFIG_USB_CONFIGFS_RNDIS` must be enabled for this feature to work. If your kernel does not have or use configfs for USB configuration, it will not work regardless of whether you have the RNDIS function enabled. Once it's enabled, connect your device to a computer over USB. You should see something like this in your kernel logs if you are running Linux: ```log [7064379.627645] usb 7-3: new high-speed USB device number 114 using xhci_hcd [7064379.772208] usb 7-3: New USB device found, idVendor=0b05, idProduct=4daf, bcdDevice= 4.14 [7064379.772210] usb 7-3: New USB device strings: Mfr=1, Product=2, SerialNumber=3 [7064379.772211] usb 7-3: Product: Alpine GNU/Linux [7064379.772211] usb 7-3: Manufacturer: Linux [7064379.772212] usb 7-3: SerialNumber: ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@10.15.19.82 [7064379.818904] rndis_host 7-3:1.0 usb0: register 'rndis_host' at usb-0000:47:00.1-3, RNDIS device, da:34:ab:99:c5:81 [7064379.870018] rndis_host 7-3:1.0 enp71s0f1u3: renamed from usb0 ``` Run the SSH command in the serial number field to open a shell to the device. The password is empty, so just press enter when asked to provide a password. ================================================ FILE: anykernel/LICENSE ================================================ ## AnyKernel3 (AK3), and AnyKernel2/AnyKernel 2.0 (AK2) Scripts License: AnyKernel (versions 2.0/2 and later) Android image modifying scripts. Copyright (c) 2019 Chris Renshaw (osm0sis @ xda-developers), and additional contributors per readily available commit history/credits. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## Included Binary Licenses: magiskboot, magiskpolicy (Magisk): GPLv3+ Magisk, including all git submodules are free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Per Section 6(d), official compiled binaries from unmodified source: https://github.com/topjohnwu/Magisk busybox: GPLv2 BusyBox is distributed under version 2 of the General Public License. Version 2 is the only version of this license which this version of BusyBox (or modified versions derived from this one) may be distributed under. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Per Section 3(b), self-compiled binary from modified source: https://git.busybox.net/busybox/ https://github.com/osm0sis/android-busybox-ndk (pre-patched source tree used to build available upon request) ## Optional Binary Licenses: mkbootfs, mkbootimg: Apache License 2.0 mkmtkhdr: Apache License 2.0, implied (AOSP mkbootimg derived) boot_signer*.jar: Apache License 2.0 Copyright (c) 2008 The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Source not required, however, respective sources are provided: https://github.com/osm0sis/mkbootfs https://github.com/osm0sis/mkbootimg https://github.com/osm0sis/mkmtkhdr https://android.googlesource.com/platform/system/extras/+/master flash_erase, nanddump, nandwrite (mtd-utils): GPLv2 dumpimage, mkimage (U-Boot): GPLv2+ mboot: GPLv2 (Intel mboot.py derived) Copyright their respective authors, (linked below). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Per Section 3(b), self-compiled binaries from unmodified respective sources: http://git.infradead.org/mtd-utils.git https://gitlab.denx.de/u-boot/u-boot https://github.com/osm0sis/mboot futility: BSD 3-Clause License (Chromium OS) unpackelf, elftool: BSD 3-Clause License, implied (Sony mkelf.py derived) Copyright their respective authors, (linked below). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Source not required, however, respective sources are provided: https://github.com/osm0sis/futility https://github.com/osm0sis/unpackelf https://github.com/osm0sis/elftool (https://github.com/sonyxperiadev/device-sony-lt26/tree/master/tools) rkcrc: BSD 2-Clause License Copyright (c) 2010, 2011 Fukaumi Naoki Copyright (c) 2013 Ivo van Poorten All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Source not required, however, respective source is provided: https://github.com/linux-rockchip/rkflashtool ## Additional Build Scripts for Listed Binaries (where used): osm0sis' Odds and Ends Thread - Knowledge Base: https://forum.xda-developers.com/showthread.php?p=53554719 ================================================ FILE: anykernel/META-INF/com/google/android/update-binary ================================================ #!/sbin/sh # AnyKernel3 Backend (DO NOT CHANGE) # osm0sis @ xda-developers OUTFD=/proc/self/fd/$2; ZIPFILE="$3"; BOOTMODE=false; ps | grep zygote | grep -v grep >/dev/null && BOOTMODE=true; $BOOTMODE || ps -A 2>/dev/null | grep zygote | grep -v grep >/dev/null && BOOTMODE=true; DIR=/sdcard; $BOOTMODE || DIR=$(dirname "$ZIPFILE"); [ -d /postinstall/tmp ] && POSTINSTALL=/postinstall; [ "$AKHOME" ] || AKHOME=$POSTINSTALL/tmp/anykernel; [ "$ANDROID_ROOT" ] || ANDROID_ROOT=/system; ui_print() { until [ ! "$1" ]; do echo "ui_print $1 ui_print" >> $OUTFD; shift; done; } ui_printfile() { while IFS='' read -r line || $BB [[ -n "$line" ]]; do ui_print "$line"; done < $1; } show_progress() { echo "progress $1 $2" >> $OUTFD; } file_getprop() { $BB grep "^$2=" "$1" | $BB cut -d= -f2-; } find_slot() { local slot=$(getprop ro.boot.slot_suffix 2>/dev/null); [ "$slot" ] || slot=$($BB grep -o 'androidboot.slot_suffix=.*$' /proc/cmdline | $BB cut -d\ -f1 | $BB cut -d= -f2); if [ ! "$slot" ]; then slot=$(getprop ro.boot.slot 2>/dev/null); [ "$slot" ] || slot=$($BB grep -o 'androidboot.slot=.*$' /proc/cmdline | $BB cut -d\ -f1 | $BB cut -d= -f2); [ "$slot" ] && slot=_$slot; fi; [ "$slot" ] && echo "$slot"; } setup_mountpoint() { [ -L $1 ] && $BB mv -f $1 ${1}_link; if [ ! -d $1 ]; then rm -f $1; mkdir -p $1; fi; } is_mounted() { $BB mount | $BB grep -q " $1 "; } mount_apex() { [ -d /system_root/system/apex ] || return 1; local apex dest loop minorx num; setup_mountpoint /apex; minorx=1; [ -e /dev/block/loop1 ] && minorx=$($BB ls -l /dev/block/loop1 | $BB awk '{ print $6 }'); num=0; for apex in /system_root/system/apex/*; do dest=/apex/$($BB basename $apex .apex); [ "$dest" == /apex/com.android.runtime.release ] && dest=/apex/com.android.runtime; $BB mkdir -p $dest; case $apex in *.apex) $BB unzip -qo $apex apex_payload.img -d /apex; $BB mv -f /apex/apex_payload.img $dest.img; $BB mount -t ext4 -o ro,noatime $dest.img $dest 2>/dev/null; if [ $? != 0 ]; then while [ $num -lt 64 ]; do loop=/dev/block/loop$num; ($BB mknod $loop b 7 $((num * minorx)); $BB losetup $loop $dest.img) 2>/dev/null; num=$((num + 1)); $BB losetup $loop | $BB grep -q $dest.img && break; done; $BB mount -t ext4 -o ro,loop,noatime $loop $dest; if [ $? != 0 ]; then $BB losetup -d $loop 2>/dev/null; fi; fi; ;; *) $BB mount -o bind $apex $dest;; esac; done; export ANDROID_RUNTIME_ROOT=/apex/com.android.runtime; export ANDROID_TZDATA_ROOT=/apex/com.android.tzdata; export BOOTCLASSPATH=/apex/com.android.runtime/javalib/core-oj.jar:/apex/com.android.runtime/javalib/core-libart.jar:/apex/com.android.runtime/javalib/okhttp.jar:/apex/com.android.runtime/javalib/bouncycastle.jar:/apex/com.android.runtime/javalib/apache-xml.jar:/system/framework/framework.jar:/system/framework/ext.jar:/system/framework/telephony-common.jar:/system/framework/voip-common.jar:/system/framework/ims-common.jar:/system/framework/android.test.base.jar:/system/framework/telephony-ext.jar:/apex/com.android.conscrypt/javalib/conscrypt.jar:/apex/com.android.media/javalib/updatable-media.jar; } umount_apex() { [ -d /apex ] || return 1; local dest loop; for dest in $($BB find /apex -type d -mindepth 1 -maxdepth 1); do if [ -f $dest.img ]; then loop=$($BB mount | $BB grep $dest | $BB cut -d\ -f1); fi; ($BB umount -l $dest; $BB losetup -d $loop) 2>/dev/null; done; $BB rm -rf /apex 2>/dev/null; unset ANDROID_RUNTIME_ROOT ANDROID_TZDATA_ROOT BOOTCLASSPATH; } mount_all() { if ! is_mounted /cache; then $BB mount /cache 2>/dev/null && UMOUNT_CACHE=1; fi; if ! is_mounted /data; then $BB mount /data && UMOUNT_DATA=1; fi; ($BB mount -o ro -t auto /vendor; $BB mount -o ro -t auto /product; $BB mount -o ro -t auto /persist) 2>/dev/null; setup_mountpoint $ANDROID_ROOT; if ! is_mounted $ANDROID_ROOT; then $BB mount -o ro -t auto $ANDROID_ROOT 2>/dev/null; fi; case $ANDROID_ROOT in /system_root) setup_mountpoint /system;; /system) if ! is_mounted /system && ! is_mounted /system_root; then setup_mountpoint /system_root; $BB mount -o ro -t auto /system_root; elif [ -f /system/system/build.prop ]; then setup_mountpoint /system_root; $BB mount --move /system /system_root; fi; if [ $? != 0 ]; then ($BB umount /system; $BB umount -l /system) 2>/dev/null; if [ -d /dev/block/mapper ]; then [ -e /dev/block/mapper/system ] || local slot=$(find_slot); $BB mount -o ro -t auto /dev/block/mapper/vendor$slot /vendor; $BB mount -o ro -t auto /dev/block/mapper/product$slot /product 2>/dev/null; $BB mount -o ro -t auto /dev/block/mapper/system$slot /system_root; else [ -e /dev/block/bootdevice/by-name/system ] || local slot=$(find_slot); ($BB mount -o ro -t auto /dev/block/bootdevice/by-name/vendor$slot /vendor; $BB mount -o ro -t auto /dev/block/bootdevice/by-name/product$slot /product; $BB mount -o ro -t auto /dev/block/bootdevice/by-name/persist$slot /persist) 2>/dev/null; $BB mount -o ro -t auto /dev/block/bootdevice/by-name/system$slot /system_root; fi; fi; ;; esac; if is_mounted /system_root; then mount_apex; if [ -f /system_root/build.prop ]; then $BB mount -o bind /system_root /system; else $BB mount -o bind /system_root/system /system; fi; fi; } umount_all() { local mount; (if [ ! -d /postinstall/tmp ]; then $BB umount /system; $BB umount -l /system; if [ -e /system_root ]; then $BB umount /system_root; $BB umount -l /system_root; fi; fi; umount_apex; umount /vendor; # busybox umount /vendor breaks recovery on some devices umount -l /vendor; for mount in /mnt/system /mnt/vendor /product /mnt/product /persist; do $BB umount $mount; $BB umount -l $mount; done; if [ "$UMOUNT_DATA" ]; then $BB umount /data; $BB umount -l /data; fi; if [ "$UMOUNT_CACHE" ]; then $BB umount /cache; $BB umount -l /cache; fi) 2>/dev/null; } setup_env() { $BOOTMODE && return 1; $BB mount -o bind /dev/urandom /dev/random; if [ -L /etc ]; then setup_mountpoint /etc; $BB cp -af /etc_link/* /etc; $BB sed -i 's; / ; /system_root ;' /etc/fstab; fi; umount_all; mount_all; OLD_LD_PATH=$LD_LIBRARY_PATH; OLD_LD_PRE=$LD_PRELOAD; OLD_LD_CFG=$LD_CONFIG_FILE; unset LD_LIBRARY_PATH LD_PRELOAD LD_CONFIG_FILE; if [ ! "$(getprop 2>/dev/null)" ]; then getprop() { local propdir propfile propval; for propdir in / /system_root /system /vendor /odm /product; do for propfile in default.prop build.prop; do if [ "$propval" ]; then break 2; else propval="$(file_getprop $propdir/$propfile $1 2>/dev/null)"; fi; done; done; if [ "$propval" ]; then echo "$propval"; else echo ""; fi; } elif [ ! "$(getprop ro.build.type 2>/dev/null)" ]; then getprop() { ($(which getprop) | $BB grep "$1" | $BB cut -d[ -f3 | $BB cut -d] -f1) 2>/dev/null; } fi; } restore_env() { $BOOTMODE && return 1; local dir; unset -f getprop; [ "$OLD_LD_PATH" ] && export LD_LIBRARY_PATH=$OLD_LD_PATH; [ "$OLD_LD_PRE" ] && export LD_PRELOAD=$OLD_LD_PRE; [ "$OLD_LD_CFG" ] && export LD_CONFIG_FILE=$OLD_LD_CFG; umount_all; [ -L /etc_link ] && $BB rm -rf /etc/*; (for dir in /apex /system /system_root /etc; do if [ -L "${dir}_link" ]; then rmdir $dir; $BB mv -f ${dir}_link $dir; fi; done; $BB umount -l /dev/random) 2>/dev/null; } debugging() { case $(basename "$ZIPFILE" .zip) in *-debugging) ui_print " " "Creating debugging archive in $DIR..."; [ -f /tmp/recovery.log ] && local log=/tmp/recovery.log; $BB tar -czf "$DIR/anykernel3-$(date +%Y-%m-%d_%H%M%S)-debug.tgz" $AKHOME $log; ;; esac; } cleanup() { cd $(dirname $AKHOME); rm -rf $AKHOME; } abort() { ui_print "$@"; debugging; restore_env; if [ ! -f anykernel.sh -o "$(file_getprop anykernel.sh do.cleanuponabort 2>/dev/null)" == 1 ]; then cleanup; fi; exit 1; } do_devicecheck() { [ "$(file_getprop anykernel.sh do.devicecheck)" == 1 ] || return 1; local device devicename match product testname vendordevice vendorproduct; ui_print "Checking device..."; device=$(getprop ro.product.device 2>/dev/null); product=$(getprop ro.build.product 2>/dev/null); vendordevice=$(getprop ro.product.vendor.device 2>/dev/null); vendorproduct=$(getprop ro.vendor.product.device 2>/dev/null); for testname in $(file_getprop anykernel.sh 'device.name.*'); do for devicename in $device $product $vendordevice $vendorproduct; do if [ "$devicename" == "$testname" ]; then ui_print "$testname" " "; match=1; break 2; fi; done; done; if [ ! "$match" ]; then abort " " "Unsupported device. Aborting..."; fi; } int2ver() { if $BB [ "$1" -eq "$1" ] 2>/dev/null; then echo "$1.0.0"; elif [ ! "$(echo "$1" | $BB cut -d. -f3)" ]; then echo "$1.0"; else echo "$1"; fi; } do_versioncheck() { [ "$(file_getprop anykernel.sh supported.versions)" ] || return 1; local android_ver hi_ver lo_ver parsed_ver supported supported_ver; ui_print "Checking Android version..."; supported_ver=$(file_getprop anykernel.sh supported.versions | $BB tr -d '[:space:]'); android_ver=$(file_getprop /system/build.prop ro.build.version.release); parsed_ver=$(int2ver $android_ver); if echo $supported_ver | $BB grep -q '-'; then lo_ver=$(int2ver "$(echo $supported_ver | $BB cut -d- -f1)"); hi_ver=$(int2ver "$(echo $supported_ver | $BB cut -d- -f2)"); if echo -e "$hi_ver\n$lo_ver\n$parsed_ver" | $BB sort -g | $BB grep -n "$parsed_ver" | $BB grep -q '^2:'; then supported=1; fi; else for ver in $(echo $supported_ver | $BB sed 's;,; ;g'); do if [ "$(int2ver $ver)" == "$parsed_ver" ]; then supported=1; break; fi; done; fi; if [ "$supported" ]; then ui_print "$android_ver" " "; else abort " " "Unsupported Android version. Aborting..."; fi; } do_levelcheck() { [ "$(file_getprop anykernel.sh supported.patchlevels)" ] || return 1; local android_lvl hi_lvl lo_lvl parsed_lvl supported_lvl; ui_print "Checking Android security patch level..."; supported_lvl=$(file_getprop anykernel.sh supported.patchlevels | $BB grep -oE '[0-9]{4}-[0-9]{2}|-'); android_lvl=$(file_getprop /system/build.prop ro.build.version.security_patch); parsed_lvl=$(echo $android_lvl | $BB grep -oE '[0-9]{4}-[0-9]{2}'); if echo $supported_lvl | $BB grep -q '^\-'; then lo_lvl=0000-00; hi_lvl=$(echo $supported_lvl | $BB awk '{ print $2 }'); elif echo $supported_lvl | $BB grep -q ' - '; then lo_lvl=$(echo $supported_lvl | $BB awk '{ print $1 }'); hi_lvl=$(echo $supported_lvl | $BB awk '{ print $3 }'); elif echo $supported_lvl | $BB grep -q '\-$'; then lo_lvl=$(echo $supported_lvl | $BB awk '{ print $1 }'); hi_lvl=9999-99; fi; if echo -e "$hi_lvl\n$lo_lvl\n$parsed_lvl" | $BB sort -g | $BB grep -n "$parsed_lvl" | $BB grep -q '^2:'; then ui_print "$android_lvl" " "; else abort " " "Unsupported Android security patch level. Aborting..."; fi; } dump_moduleinfo() { cat < $1; name=AK3 Helper Module version=$($BB awk '{ print $3 }' $AKHOME/vertmp) $($BB grep -oE '#.[0-9]' $AKHOME/vertmp) versionCode=1 author=AnyKernel3 description=$KERNEL_STRING EOF } dump_moduleremover() { cat <<'EOF' > $1; #!/system/bin/sh MODDIR=${0%/*}; if [ "$(cat /proc/version)" != "$(cat $MODDIR/version)" ]; then rm -rf $MODDIR; fi; EOF } do_modules() { [ "$(file_getprop anykernel.sh do.modules)" == 1 ] || return 1; local block modcon moddir modtarget module slot umask; if [ "$(file_getprop anykernel.sh do.systemless)" == 1 ]; then cd $AKHOME/modules; ui_print " " "Creating kernel helper Magisk module..."; if [ -d /data/adb/magisk -a -f $AKHOME/split_img/.magisk ]; then umask=$(umask); umask 022; moddir=/data/adb/modules/ak3-helper; rm -rf $moddir; mkdir -p system $moddir; ($BB mv -f product system; $BB mv -f vendor system) 2>/dev/null; $BB cp -rLf * $moddir; dump_moduleinfo $moddir/module.prop; dump_moduleremover $moddir/post-fs-data.sh; cp -f $AKHOME/vertmp $moddir/version; umask $umask; else ui_print "Magisk installation not found. Skipped!"; fi; else cd $AKHOME/modules; ui_print " " "Pushing modules..."; if [ -d /dev/block/mapper ]; then for block in system vendor product; do for slot in "" _a _b; do $BB blockdev --setrw /dev/block/mapper/$block$slot 2>/dev/null; done; done; fi; if [ ! -d /postinstall/tmp ]; then $BB mount -o rw,remount -t auto /system; ($BB mount -o rw,remount -t auto /vendor; $BB mount -o rw,remount -t auto /product) 2>/dev/null; fi; for module in $(find . -name '*.ko'); do modtarget=$POSTINSTALL$(echo $module | $BB cut -c2-); if [ ! -e $modtarget ]; then case $module in */vendor/*) modcon=vendor;; */product/*) modcon=product;; *) modcon=system;; esac; fi; if is_mounted $modtarget; then $BB mount -o rw,remount -t auto $modtarget; fi; mkdir -p $(dirname $modtarget); $BB cp -rLf $module $modtarget; $BB chown 0:0 $modtarget; $BB chmod 644 $modtarget; if [ "$modcon" ]; then chcon "u:object_r:${modcon}_file:s0" $modtarget; fi; if is_mounted $modtarget; then $BB mount -o ro,remount -t auto $modtarget; fi; done; if [ ! -d /postinstall/tmp ]; then $BB mount -o ro,remount -t auto /system; ($BB mount -o ro,remount -t auto /vendor; $BB mount -o ro,remount -t auto /product) 2>/dev/null; fi; fi; cd $AKHOME; } show_progress 1.34 4; ui_print " "; cleanup; mkdir -p $AKHOME/bin; cd $AKHOME; unzip -o "$ZIPFILE"; if [ $? != 0 -o ! "$(ls tools)" ]; then abort "Unzip failed. Aborting..."; fi; for ARCH32 in x86 arm; do if [ -d $AKHOME/tools/$ARCH32 ]; then BB=$AKHOME/tools/$ARCH32/busybox; chmod 755 $BB; $BB >/dev/null 2>&1; if [ $? == 0 ]; then $BB mv -f $AKHOME/tools/$ARCH32/* $AKHOME/tools; break; fi; fi; done; BB=$AKHOME/tools/busybox; chmod 755 $BB; $BB chmod -R 755 tools bin; $BB --install -s bin; if [ $? != 0 -o -z "$(ls bin)" ]; then abort "Busybox setup failed. Aborting..."; fi; if [ -f banner ]; then ui_printfile banner; ui_print " " " "; fi; KERNEL_STRING="$(file_getprop anykernel.sh kernel.string)"; ui_print "$KERNEL_STRING"; if [ -f version ]; then ui_print " "; ui_printfile version; ui_print " "; fi; ui_print " " "AnyKernel3 by osm0sis @ xda-developers" " " " "; setup_env; do_devicecheck; do_versioncheck; do_levelcheck; ui_print "Installing..."; CORE=$($BB grep -oE 'ak.*core.sh' anykernel.sh); [ -f tools/$CORE ] || $BB ln -s $AKHOME/tools/ak*-core.sh $AKHOME/tools/$CORE; PATH="$AKHOME/bin:$PATH" home=$AKHOME $BB ash anykernel.sh $2; if [ $? != 0 ]; then abort; fi; do_modules; debugging; restore_env; if [ "$(file_getprop anykernel.sh do.cleanup)" == 1 ]; then cleanup; fi; ui_print " " " " "Done!"; ================================================ FILE: anykernel/META-INF/com/google/android/updater-script ================================================ #FLASHAFTERUPDATEV2 # Dummy file; update-binary is a shell script. ================================================ FILE: anykernel/anykernel.sh ================================================ # AnyKernel3 Ramdisk Mod Script # osm0sis @ xda-developers ## AnyKernel setup # begin properties properties() { ' kernel.string=freqbench - CPU benchmark by kdrag0n do.devicecheck=0 do.modules=0 do.systemless=0 do.cleanup=1 do.cleanuponabort=0 supported.versions= supported.patchlevels= '; } # end properties # shell variables block=auto; is_slot_device=auto; ramdisk_compression=auto; ## AnyKernel methods (DO NOT CHANGE) # import patching functions/variables - see for reference . tools/ak3-core.sh; ## AnyKernel install split_boot; cores=$(grep '^processor' /proc/cpuinfo | wc -l) all_cpus=0-$((cores - 1)) bench_cpus=1-$((cores - 1)) patch_cmdline rcu_nocbs rcu_nocbs=$all_cpus patch_cmdline isolcpus isolcpus=$bench_cpus patch_cmdline nohz_full nohz_full=$bench_cpus patch_cmdline loglevel loglevel=0 patch_cmdline printk.devkmsg printk.devkmsg=on patch_cmdline skip_initramfs "" mv $home/rd-new.cpio.gz $home/ramdisk-new.cpio flash_boot; ## end install ================================================ FILE: anykernel/tools/ak3-core.sh ================================================ ### AnyKernel methods (DO NOT CHANGE) ## osm0sis @ xda-developers OUTFD=$1; # set up working directory variables [ "$home" ] || home=$PWD; bootimg=$home/boot.img; bin=$home/tools; patch=$home/patch; ramdisk=$home/ramdisk; split_img=$home/split_img; ### output/testing functions: # ui_print "" [...] ui_print() { until [ ! "$1" ]; do echo "ui_print $1 ui_print" >> /proc/self/fd/$OUTFD; shift; done; } # abort ["" [...]] abort() { ui_print " " "$@"; exit 1; } # contains contains() { [ "${1#*$2}" != "$1" ]; } # file_getprop file_getprop() { grep "^$2=" "$1" | cut -d= -f2-; } ### ### file/directory attributes functions: # set_perm [ ...] set_perm() { local uid gid mod; uid=$1; gid=$2; mod=$3; shift 3; chown $uid:$gid "$@" || chown $uid.$gid "$@"; chmod $mod "$@"; } # set_perm_recursive [ ...] set_perm_recursive() { local uid gid dmod fmod; uid=$1; gid=$2; dmod=$3; fmod=$4; shift 4; while [ "$1" ]; do chown -R $uid:$gid "$1" || chown -R $uid.$gid "$1"; find "$1" -type d -exec chmod $dmod {} +; find "$1" -type f -exec chmod $fmod {} +; shift; done; } ### ### dump_boot functions: # split_boot (dump and split image only) split_boot() { local dumpfail; if [ ! -e "$(echo $block | cut -d\ -f1)" ]; then abort "Invalid partition. Aborting..."; fi; if [ "$(echo $block | grep ' ')" ]; then block=$(echo $block | cut -d\ -f1); customdd=$(echo $block | cut -d\ -f2-); elif [ ! "$customdd" ]; then local customdd="bs=1048576"; fi; if [ -f "$bin/nanddump" ]; then $bin/nanddump -f $bootimg $block; else dd if=$block of=$bootimg $customdd; fi; [ $? != 0 ] && dumpfail=1; mkdir -p $split_img; cd $split_img; if [ -f "$bin/unpackelf" ] && $bin/unpackelf -i $bootimg -h -q 2>/dev/null; then if [ -f "$bin/elftool" ]; then mkdir elftool_out; $bin/elftool unpack -i $bootimg -o elftool_out; fi; $bin/unpackelf -i $bootimg; [ $? != 0 ] && dumpfail=1; mv -f boot.img-zImage kernel.gz; mv -f boot.img-ramdisk.cpio.gz ramdisk.cpio.gz; mv -f boot.img-cmdline cmdline.txt 2>/dev/null; if [ -f boot.img-dt -a ! -f "$bin/elftool" ]; then case $(od -ta -An -N4 boot.img-dt | sed -e 's/ del//' -e 's/ //g') in QCDT|ELF) mv -f boot.img-dt dt;; *) gzip -c kernel.gz > kernel.gz-dtb; cat boot.img-dt >> kernel.gz-dtb; rm -f boot.img-dt kernel.gz; ;; esac; fi; elif [ -f "$bin/mboot" ]; then $bin/mboot -u -f $bootimg; elif [ -f "$bin/dumpimage" ]; then dd bs=$(($(printf '%d\n' 0x$(hexdump -n 4 -s 12 -e '16/1 "%02x""\n"' $bootimg)) + 64)) count=1 conv=notrunc if=$bootimg of=boot-trimmed.img; $bin/dumpimage -l boot-trimmed.img > header; grep "Name:" header | cut -c15- > boot.img-name; grep "Type:" header | cut -c15- | cut -d\ -f1 > boot.img-arch; grep "Type:" header | cut -c15- | cut -d\ -f2 > boot.img-os; grep "Type:" header | cut -c15- | cut -d\ -f3 | cut -d- -f1 > boot.img-type; grep "Type:" header | cut -d\( -f2 | cut -d\) -f1 | cut -d\ -f1 | cut -d- -f1 > boot.img-comp; grep "Address:" header | cut -c15- > boot.img-addr; grep "Point:" header | cut -c15- > boot.img-ep; $bin/dumpimage -p 0 -o kernel.gz boot-trimmed.img; [ $? != 0 ] && dumpfail=1; case $(cat boot.img-type) in Multi) $bin/dumpimage -p 1 -o ramdisk.cpio.gz boot-trimmed.img;; RAMDisk) mv -f kernel.gz ramdisk.cpio.gz;; esac; elif [ -f "$bin/rkcrc" ]; then dd bs=4096 skip=8 iflag=skip_bytes conv=notrunc if=$bootimg of=ramdisk.cpio.gz; else $bin/magiskboot unpack -h $bootimg; case $? in 1) dumpfail=1;; 2) touch chromeos;; esac; fi; if [ $? != 0 -o "$dumpfail" ]; then abort "Dumping/splitting image failed. Aborting..."; fi; cd $home; } # unpack_ramdisk (extract ramdisk only) unpack_ramdisk() { local comp; cd $split_img; if [ -f ramdisk.cpio.gz ]; then if [ -f "$bin/mkmtkhdr" ]; then mv -f ramdisk.cpio.gz ramdisk.cpio.gz-mtk; dd bs=512 skip=1 conv=notrunc if=ramdisk.cpio.gz-mtk of=ramdisk.cpio.gz; fi; mv -f ramdisk.cpio.gz ramdisk.cpio; fi; if [ -f ramdisk.cpio ]; then comp=$($bin/magiskboot decompress ramdisk.cpio 2>&1 | grep -v 'raw' | sed -n 's;.*\[\(.*\)\];\1;p'); else abort "No ramdisk found to unpack. Aborting..."; fi; if [ "$comp" ]; then mv -f ramdisk.cpio ramdisk.cpio.$comp; $bin/magiskboot decompress ramdisk.cpio.$comp ramdisk.cpio; if [ $? != 0 ]; then echo "Attempting ramdisk unpack with busybox $comp..." >&2; $comp -dc ramdisk.cpio.$comp > ramdisk.cpio; fi; fi; [ -d $ramdisk ] && mv -f $ramdisk $home/rdtmp; mkdir -p $ramdisk; chmod 755 $ramdisk; cd $ramdisk; EXTRACT_UNSAFE_SYMLINKS=1 cpio -d -F $split_img/ramdisk.cpio -i; if [ $? != 0 -o ! "$(ls)" ]; then abort "Unpacking ramdisk failed. Aborting..."; fi; if [ -d "$home/rdtmp" ]; then cp -af $home/rdtmp/* .; fi; } ### dump_boot (dump and split image, then extract ramdisk) dump_boot() { split_boot; unpack_ramdisk; } ### ### write_boot functions: # repack_ramdisk (repack ramdisk only) repack_ramdisk() { local comp packfail mtktype; cd $home; case $ramdisk_compression in auto|"") comp=$(ls $split_img/ramdisk.cpio.* 2>/dev/null | grep -v 'mtk' | rev | cut -d. -f1 | rev);; none|cpio) comp="";; gz) comp=gzip;; lzo) comp=lzop;; bz2) comp=bzip2;; lz4-l) comp=lz4_legacy;; *) comp=$ramdisk_compression;; esac; if [ -f "$bin/mkbootfs" ]; then $bin/mkbootfs $ramdisk > ramdisk-new.cpio; else cd $ramdisk; find . | cpio -H newc -o > $home/ramdisk-new.cpio; fi; [ $? != 0 ] && packfail=1; cd $home; $bin/magiskboot cpio ramdisk-new.cpio test; magisk_patched=$?; [ $((magisk_patched & 3)) -eq 1 ] && $bin/magiskboot cpio ramdisk-new.cpio "extract .backup/.magisk $split_img/.magisk"; if [ "$comp" ]; then $bin/magiskboot compress=$comp ramdisk-new.cpio; if [ $? != 0 ]; then echo "Attempting ramdisk repack with busybox $comp..." >&2; $comp -9c ramdisk-new.cpio > ramdisk-new.cpio.$comp; [ $? != 0 ] && packfail=1; rm -f ramdisk-new.cpio; fi; fi; if [ "$packfail" ]; then abort "Repacking ramdisk failed. Aborting..."; fi; if [ -f "$bin/mkmtkhdr" -a -f "$split_img/boot.img-base" ]; then mtktype=$(od -ta -An -N8 -j8 $split_img/ramdisk.cpio.gz-mtk | sed -e 's/ nul//g' -e 's/ //g' | tr '[:upper:]' '[:lower:]'); case $mtktype in rootfs|recovery) $bin/mkmtkhdr --$mtktype ramdisk-new.cpio*;; esac; fi; } # flash_boot (build, sign and write image only) flash_boot() { local varlist i kernel ramdisk fdt cmdline comp part0 part1 nocompflag signfail pk8 cert avbtype; cd $split_img; if [ -f "$bin/mkimage" ]; then varlist="name arch os type comp addr ep"; elif [ -f "$bin/mkbootimg" -a -f "$bin/unpackelf" -a -f boot.img-base ]; then mv -f cmdline.txt boot.img-cmdline 2>/dev/null; varlist="cmdline base pagesize kernel_offset ramdisk_offset tags_offset"; fi; for i in $varlist; do if [ -f boot.img-$i ]; then eval local $i=\"$(cat boot.img-$i)\"; fi; done; cd $home; for i in zImage zImage-dtb Image Image-dtb Image.gz Image.gz-dtb Image.bz2 Image.bz2-dtb Image.lzo Image.lzo-dtb Image.lzma Image.lzma-dtb Image.xz Image.xz-dtb Image.lz4 Image.lz4-dtb Image.fit; do if [ -f $i ]; then kernel=$home/$i; break; fi; done; if [ "$kernel" ]; then if [ -f "$bin/mkmtkhdr" -a -f "$split_img/boot.img-base" ]; then $bin/mkmtkhdr --kernel $kernel; kernel=$kernel-mtk; fi; elif [ "$(ls $split_img/kernel* 2>/dev/null)" ]; then kernel=$(ls $split_img/kernel* | grep -v 'kernel_dtb' | tail -n1); fi; if [ "$(ls ramdisk-new.cpio* 2>/dev/null)" ]; then ramdisk=$home/$(ls ramdisk-new.cpio* | tail -n1); elif [ -f "$bin/mkmtkhdr" -a -f "$split_img/boot.img-base" ]; then ramdisk=$split_img/ramdisk.cpio.gz-mtk; else ramdisk=$(ls $split_img/ramdisk.cpio* 2>/dev/null | tail -n1); fi; for fdt in dt recovery_dtbo dtb; do for i in $home/$fdt $home/$fdt.img $split_img/$fdt; do if [ -f $i ]; then eval local $fdt=$i; break; fi; done; done; cd $split_img; if [ -f "$bin/mkimage" ]; then [ "$comp" == "uncompressed" ] && comp=none; part0=$kernel; case $type in Multi) part1=":$ramdisk";; RAMDisk) part0=$ramdisk;; esac; $bin/mkimage -A $arch -O $os -T $type -C $comp -a $addr -e $ep -n "$name" -d $part0$part1 $home/boot-new.img; elif [ -f "$bin/elftool" ]; then [ "$dt" ] && dt="$dt,rpm"; [ -f cmdline.txt ] && cmdline="cmdline.txt@cmdline"; $bin/elftool pack -o $home/boot-new.img header=elftool_out/header $kernel $ramdisk,ramdisk $dt $cmdline; elif [ -f "$bin/mboot" ]; then cp -f $kernel kernel; cp -f $ramdisk ramdisk.cpio.gz; $bin/mboot -d $split_img -f $home/boot-new.img; elif [ -f "$bin/rkcrc" ]; then $bin/rkcrc -k $ramdisk $home/boot-new.img; elif [ -f "$bin/mkbootimg" -a -f "$bin/unpackelf" -a -f boot.img-base ]; then [ "$dt" ] && dt="--dt $dt"; $bin/mkbootimg --kernel $kernel --ramdisk $ramdisk --cmdline "$cmdline" --base $home --pagesize $pagesize --kernel_offset $kernel_offset --ramdisk_offset $ramdisk_offset --tags_offset "$tags_offset" $dt --output $home/boot-new.img; else [ "$kernel" ] && cp -f $kernel kernel; [ "$ramdisk" ] && cp -f $ramdisk ramdisk.cpio; [ "$dt" -a -f extra ] && cp -f $dt extra; for i in dtb recovery_dtbo; do [ "$(eval echo \$$i)" -a -f $i ] && cp -f $(eval echo \$$i) $i; done; case $kernel in *Image*) if [ ! "$magisk_patched" ]; then $bin/magiskboot cpio ramdisk.cpio test; magisk_patched=$?; fi; if [ $((magisk_patched & 3)) -eq 1 ]; then ui_print " " "Magisk detected! Patching kernel so reflashing Magisk is not necessary..."; comp=$($bin/magiskboot decompress kernel 2>&1 | grep -v 'raw' | sed -n 's;.*\[\(.*\)\];\1;p'); ($bin/magiskboot split $kernel || $bin/magiskboot decompress $kernel kernel) 2>/dev/null; if [ $? != 0 -a "$comp" ]; then echo "Attempting kernel unpack with busybox $comp..." >&2; $comp -dc $kernel > kernel; fi; $bin/magiskboot hexpatch kernel 736B69705F696E697472616D667300 77616E745F696E697472616D667300; if [ "$(file_getprop $home/anykernel.sh do.systemless)" == 1 ]; then strings kernel | grep -E 'Linux version.*#' > $home/vertmp; fi; if [ "$comp" ]; then $bin/magiskboot compress=$comp kernel kernel.$comp; if [ $? != 0 ]; then echo "Attempting kernel repack with busybox $comp..." >&2; $comp -9c kernel > kernel.$comp; fi; mv -f kernel.$comp kernel; fi; [ ! -f .magisk ] && $bin/magiskboot cpio ramdisk.cpio "extract .backup/.magisk .magisk"; export $(cat .magisk); [ $((magisk_patched & 8)) -ne 0 ] && export TWOSTAGEINIT=true; for fdt in dtb extra kernel_dtb recovery_dtbo; do [ -f $fdt ] && $bin/magiskboot dtb $fdt patch; done; else case $kernel in *-dtb) rm -f kernel_dtb;; esac; fi; unset magisk_patched KEEPFORCEENCRYPT KEEPVERITY SHA1 TWOSTAGEINIT; ;; esac; case $ramdisk_compression in none|cpio) nocompflag="-n";; esac; $bin/magiskboot repack $nocompflag $bootimg $home/boot-new.img; fi; if [ $? != 0 ]; then abort "Repacking image failed. Aborting..."; fi; cd $home; if [ -f "$bin/futility" -a -d "$bin/chromeos" ]; then if [ -f "$split_img/chromeos" ]; then echo "Signing with CHROMEOS..." >&2; $bin/futility vbutil_kernel --pack boot-new-signed.img --keyblock $bin/chromeos/kernel.keyblock --signprivate $bin/chromeos/kernel_data_key.vbprivk --version 1 --vmlinuz boot-new.img --bootloader $bin/chromeos/empty --config $bin/chromeos/empty --arch arm --flags 0x1; fi; [ $? != 0 ] && signfail=1; fi; if [ -f "$bin/boot_signer-dexed.jar" -a -d "$bin/avb" ]; then pk8=$(ls $bin/avb/*.pk8); cert=$(ls $bin/avb/*.x509.*); case $block in *recovery*|*SOS*) avbtype=recovery;; *) avbtype=boot;; esac; if [ "$(/system/bin/dalvikvm -Xnoimage-dex2oat -cp $bin/boot_signer-dexed.jar com.android.verity.BootSignature -verify boot.img 2>&1 | grep VALID)" ]; then echo "Signing with AVBv1..." >&2; /system/bin/dalvikvm -Xnoimage-dex2oat -cp $bin/boot_signer-dexed.jar com.android.verity.BootSignature /$avbtype boot-new.img $pk8 $cert boot-new-signed.img; fi; fi; if [ $? != 0 -o "$signfail" ]; then abort "Signing image failed. Aborting..."; fi; mv -f boot-new-signed.img boot-new.img 2>/dev/null; if [ ! -f boot-new.img ]; then abort "No repacked image found to flash. Aborting..."; elif [ "$(wc -c < boot-new.img)" -gt "$(wc -c < boot.img)" ]; then abort "New image larger than boot partition. Aborting..."; fi; blockdev --setrw $block 2>/dev/null; if [ -f "$bin/flash_erase" -a -f "$bin/nandwrite" ]; then $bin/flash_erase $block 0 0; $bin/nandwrite -p $block boot-new.img; elif [ "$customdd" ]; then dd if=/dev/zero of=$block $customdd 2>/dev/null; dd if=boot-new.img of=$block $customdd; else cat boot-new.img /dev/zero > $block 2>/dev/null || true; fi; if [ $? != 0 ]; then abort "Flashing image failed. Aborting..."; fi; } # flash_dtbo (flash dtbo only) flash_dtbo() { local i dtbo dtboblock; cd $home; for i in dtbo dtbo.img; do if [ -f $i ]; then dtbo=$i; break; fi; done; if [ "$dtbo" ]; then dtboblock=/dev/block/bootdevice/by-name/dtbo$slot; if [ ! -e "$dtboblock" ]; then abort "dtbo partition could not be found. Aborting..."; fi; blockdev --setrw $dtboblock 2>/dev/null; if [ -f "$bin/flash_erase" -a -f "$bin/nandwrite" ]; then $bin/flash_erase $dtboblock 0 0; $bin/nandwrite -p $dtboblock $dtbo; elif [ "$customdd" ]; then dd if=/dev/zero of=$dtboblock 2>/dev/null; dd if=$dtbo of=$dtboblock; else cat $dtbo /dev/zero > $dtboblock 2>/dev/null || true; fi; if [ $? != 0 ]; then abort "Flashing dtbo failed. Aborting..."; fi; fi; } ### write_boot (repack ramdisk then build, sign and write image and dtbo) write_boot() { repack_ramdisk; flash_boot; flash_dtbo; } ### ### file editing functions: # backup_file backup_file() { [ ! -f $1~ ] && cp -fp $1 $1~; } # restore_file restore_file() { [ -f $1~ ] && cp -fp $1~ $1; rm -f $1~; } # replace_string replace_string() { [ "$5" == "global" ] && local scope=g; if ! grep -q "$2" $1; then sed -i "s;${3};${4};${scope}" $1; fi; } # replace_section replace_section() { local begin endstr last end; begin=$(grep -n "$2" $1 | head -n1 | cut -d: -f1); if [ "$begin" ]; then if [ "$3" == " " -o ! "$3" ]; then endstr='^[[:space:]]*$'; last=$(wc -l $1 | cut -d\ -f1); else endstr="$3"; fi; for end in $(grep -n "$endstr" $1 | cut -d: -f1) $last; do if [ "$end" ] && [ "$begin" -lt "$end" ]; then sed -i "${begin},${end}d" $1; [ "$end" == "$last" ] && echo >> $1; sed -i "${begin}s;^;${4}\n;" $1; break; fi; done; fi; } # remove_section remove_section() { local begin endstr last end; begin=$(grep -n "$2" $1 | head -n1 | cut -d: -f1); if [ "$begin" ]; then if [ "$3" == " " -o ! "$3" ]; then endstr='^[[:space:]]*$'; last=$(wc -l $1 | cut -d\ -f1); else endstr="$3"; fi; for end in $(grep -n "$endstr" $1 | cut -d: -f1) $last; do if [ "$end" ] && [ "$begin" -lt "$end" ]; then sed -i "${begin},${end}d" $1; break; fi; done; fi; } # insert_line insert_line() { local offset line; if ! grep -q "$2" $1; then case $3 in before) offset=0;; after) offset=1;; esac; line=$((`grep -n "$4" $1 | head -n1 | cut -d: -f1` + offset)); if [ -f $1 -a "$line" ] && [ "$(wc -l $1 | cut -d\ -f1)" -lt "$line" ]; then echo "$5" >> $1; else sed -i "${line}s;^;${5}\n;" $1; fi; fi; } # replace_line replace_line() { local lines line; if grep -q "$2" $1; then lines=$(grep -n "$2" $1 | cut -d: -f1 | sort -nr); [ "$4" == "global" ] || lines=$(echo "$lines" | tail -n1); for line in $lines; do sed -i "${line}s;.*;${3};" $1; done; fi; } # remove_line remove_line() { local lines line; if grep -q "$2" $1; then lines=$(grep -n "$2" $1 | cut -d: -f1 | sort -nr); [ "$3" == "global" ] || lines=$(echo "$lines" | tail -n1); for line in $lines; do sed -i "${line}d" $1; done; fi; } # prepend_file prepend_file() { if ! grep -q "$2" $1; then echo "$(cat $patch/$3 $1)" > $1; fi; } # insert_file insert_file() { local offset line; if ! grep -q "$2" $1; then case $3 in before) offset=0;; after) offset=1;; esac; line=$((`grep -n "$4" $1 | head -n1 | cut -d: -f1` + offset)); sed -i "${line}s;^;\n;" $1; sed -i "$((line - 1))r $patch/$5" $1; fi; } # append_file append_file() { if ! grep -q "$2" $1; then echo -ne "\n" >> $1; cat $patch/$3 >> $1; echo -ne "\n" >> $1; fi; } # replace_file replace_file() { cp -pf $patch/$3 $1; chmod $2 $1; } # patch_fstab block|mount|fstype|options|flags patch_fstab() { local entry part newpart newentry; entry=$(grep "$2" $1 | grep "$3"); if [ ! "$(echo "$entry" | grep "$6")" -o "$6" == " " -o ! "$6" ]; then case $4 in block) part=$(echo "$entry" | awk '{ print $1 }');; mount) part=$(echo "$entry" | awk '{ print $2 }');; fstype) part=$(echo "$entry" | awk '{ print $3 }');; options) part=$(echo "$entry" | awk '{ print $4 }');; flags) part=$(echo "$entry" | awk '{ print $5 }');; esac; newpart=$(echo "$part" | sed -e "s;${5};${6};" -e "s; ;;g" -e 's;,\{2,\};,;g' -e 's;,*$;;g' -e 's;^,;;g'); newentry=$(echo "$entry" | sed "s;${part};${newpart};"); sed -i "s;${entry};${newentry};" $1; fi; } # patch_cmdline patch_cmdline() { local cmdfile cmdtmp match; if [ -f "$split_img/cmdline.txt" ]; then cmdfile=$split_img/cmdline.txt; else cmdfile=$home/cmdtmp; grep "^cmdline=" $split_img/header | cut -d= -f2- > $cmdfile; fi; if ! grep -q "$1" $cmdfile; then cmdtmp=$(cat $cmdfile); echo "$cmdtmp $2" > $cmdfile; sed -i -e 's; *; ;g' -e 's;[ \t]*$;;' $cmdfile; else match=$(grep -o "$1.*$" $cmdfile | cut -d\ -f1); sed -i -e "s;${match};${2};" -e 's; *; ;g' -e 's;[ \t]*$;;' $cmdfile; fi; if [ -f "$home/cmdtmp" ]; then sed -i "s|^cmdline=.*|cmdline=$(cat $cmdfile)|" $split_img/header; rm -f $cmdfile; fi; } # patch_prop patch_prop() { if ! grep -q "^$2=" $1; then echo -ne "\n$2=$3\n" >> $1; else local line=$(grep -n "^$2=" $1 | head -n1 | cut -d: -f1); sed -i "${line}s;.*;${2}=${3};" $1; fi; } # patch_ueventd patch_ueventd() { local file dev perm user group newentry line; file=$1; dev=$2; perm=$3; user=$4; shift 4; group="$@"; newentry=$(printf "%-23s %-4s %-8s %s\n" "$dev" "$perm" "$user" "$group"); line=$(grep -n "$dev" $file | head -n1 | cut -d: -f1); if [ "$line" ]; then sed -i "${line}s;.*;${newentry};" $file; else echo -ne "\n$newentry\n" >> $file; fi; } ### ### configuration/setup functions: # reset_ak [keep] reset_ak() { local current i; current=$(dirname $home/*-files/current); if [ -d "$current" ]; then rm -rf $current/ramdisk; for i in $bootimg boot-new.img; do [ -e $i ] && cp -af $i $current; done; fi; [ -d $split_img ] && rm -rf $ramdisk; rm -rf $bootimg $split_img $home/*-new* $home/*-files/current; if [ "$1" == "keep" ]; then [ -d $home/rdtmp ] && mv -f $home/rdtmp $ramdisk; else rm -rf $patch $home/rdtmp; fi; setup_ak; } # setup_ak setup_ak() { local blockfiles parttype name part mtdmount mtdpart mtdname target; # allow multi-partition ramdisk modifying configurations (using reset_ak) if [ "$block" ] && [ ! -d "$ramdisk" -a ! -d "$patch" ]; then blockfiles=$home/$(basename $block)-files; if [ "$(ls $blockfiles 2>/dev/null)" ]; then cp -af $blockfiles/* $home; else mkdir -p $blockfiles; fi; touch $blockfiles/current; fi; # slot detection enabled by is_slot_device=1 or auto (from anykernel.sh) case $is_slot_device in 1|auto) slot=$(getprop ro.boot.slot_suffix 2>/dev/null); [ "$slot" ] || slot=$(grep -o 'androidboot.slot_suffix=.*$' /proc/cmdline | cut -d\ -f1 | cut -d= -f2); if [ ! "$slot" ]; then slot=$(getprop ro.boot.slot 2>/dev/null); [ "$slot" ] || slot=$(grep -o 'androidboot.slot=.*$' /proc/cmdline | cut -d\ -f1 | cut -d= -f2); [ "$slot" ] && slot=_$slot; fi; if [ "$slot" ]; then if [ -d /postinstall/tmp -a ! "$slot_select" ]; then slot_select=inactive; fi; case $slot_select in inactive) case $slot in _a) slot=_b;; _b) slot=_a;; esac; ;; esac; fi; if [ ! "$slot" -a "$is_slot_device" == 1 ]; then abort "Unable to determine active boot slot. Aborting..."; fi; ;; esac; # target block partition detection enabled by block=boot recovery or auto (from anykernel.sh) case $block in auto|"") block=boot;; esac; case $block in boot|recovery) case $block in boot) parttype="ramdisk boot BOOT LNX android_boot bootimg KERN-A kernel KERNEL";; recovery) parttype="ramdisk_recovery recovery RECOVERY SOS android_recovery";; esac; for name in $parttype; do for part in $name$slot $name; do if [ "$(grep -w "$part" /proc/mtd 2> /dev/null)" ]; then mtdmount=$(grep -w "$part" /proc/mtd); mtdpart=$(echo $mtdmount | cut -d\" -f2); if [ "$mtdpart" == "$part" ]; then mtdname=$(echo $mtdmount | cut -d: -f1); else abort "Unable to determine mtd $block partition. Aborting..."; fi; if [ -e /dev/mtd/$mtdname ]; then target=/dev/mtd/$mtdname; fi; elif [ -e /dev/block/by-name/$part ]; then target=/dev/block/by-name/$part; elif [ -e /dev/block/bootdevice/by-name/$part ]; then target=/dev/block/bootdevice/by-name/$part; elif [ -e /dev/block/platform/*/by-name/$part ]; then target=/dev/block/platform/*/by-name/$part; elif [ -e /dev/block/platform/*/*/by-name/$part ]; then target=/dev/block/platform/*/*/by-name/$part; elif [ -e /dev/$part ]; then target=/dev/$part; fi; [ "$target" ] && break 2; done; done; if [ "$target" ]; then block=$(ls $target 2>/dev/null); else abort "Unable to determine $block partition. Aborting..."; fi; ;; *) if [ "$slot" ]; then [ -e "$block$slot" ] && block=$block$slot; fi; ;; esac; if [ ! "$no_block_display" ]; then ui_print "$block"; fi; } ### ### end methods setup_ak; ================================================ FILE: bench.py ================================================ #!/usr/bin/env python3 import os import sys import time import subprocess import gc import statistics import json import threading import re import csv # Need to avoid as much extra CPU usage as possible gc.disable() # sysfs power supply nodes for power sampling POWER_SUPPLY = None POWER_SUPPLY_NODES = [ # Qualcomm Battery Management System + fuel gauge: preferred when available for more info "/sys/class/power_supply/bms", # Most common "/sys/class/power_supply/battery", # Nexus 10 "/sys/class/power_supply/ds2784-fuelgauge", ] # Some fuel gauges need current unit scaling POWER_CURRENT_FACTOR = 1 POWER_CURRENT_NODES = [ # Exynos devices with Maxim PMICs report µA separately "batt_current_ua_now", # Standard µA node "current_now", ] # Full paths to final nodes POWER_CURRENT_NODE = None POWER_VOLTAGE_NODE = None # Default power sampling intervals POWER_SAMPLE_INTERVAL = 1000 # ms POWER_SAMPLE_FG_DEFAULT_INTERVALS = { # qgauge updates every 100 ms, but sampling also uses power, so do it conservatively "qpnp,qg": 250, # qpnp-fg-gen3/4 update every 1000 ms "qpnp,fg": 1000, # SM8350+ aDSP fuel gauge updates every 1000 ms "qcom,pmic_glink": 1000, } # Needs to match init and cmdline HOUSEKEEPING_CPU = 0 # cpu0 is for housekeeping, so we can't benchmark it # Benchmark cpu1 instead, which is also in the little cluster REPLACE_CPUS = { HOUSEKEEPING_CPU: 1, } # How long to idle at each freq and measure power before benchmarking FREQ_IDLE_TIME = 5 # sec # To reduce chances of an array realloc + copy during benchmark runs PREALLOC_SECONDS = 300 # seconds of power sampling # CoreMark PERFORMANCE_RUN params with 300,000 iterations COREMARK_ITERATIONS = 300000 COREMARK_PERFORMANCE_RUN = ["0x0", "0x0", "0x66", str(COREMARK_ITERATIONS), "7", "1", "2000"] # Blank lines are for rounded corner & camera cutout protection BANNER = """ __ _ _ / _|_ __ ___ __ _| |__ ___ _ __ ___| |__ | |_| '__/ _ \/ _` | '_ \ / _ \ '_ \ / __| '_ \ | _| | | __/ (_| | |_) | __/ | | | (__| | | | |_| |_| \___|\__, |_.__/ \___|_| |_|\___|_| |_| |_| CPU benchmark • by kdrag0n ------------------------------------------------ """ SYS_CPU = "/sys/devices/system/cpu" # "Constants" evaluated at runtime for psy_node in POWER_SUPPLY_NODES: if os.path.exists(psy_node): POWER_SUPPLY = psy_node break POWER_VOLTAGE_NODE = f"{POWER_SUPPLY}/voltage_now" for node in POWER_CURRENT_NODES: path = f"{POWER_SUPPLY}/{node}" if os.path.exists(path): POWER_CURRENT_NODE = path break psy_name = os.readlink(POWER_SUPPLY) for fg_string, interval in POWER_SAMPLE_FG_DEFAULT_INTERVALS.items(): if fg_string in psy_name: POWER_SAMPLE_INTERVAL = interval break if len(sys.argv) > 1: override_interval = int(sys.argv[1]) if override_interval > 0: POWER_SAMPLE_INTERVAL = override_interval # Calculate prealloc slots now that the interval is known PREALLOC_SLOTS = int(PREALLOC_SECONDS / (POWER_SAMPLE_INTERVAL / 1000)) _stop_power_mon = False _prealloc_samples = [-1] * PREALLOC_SLOTS _power_samples = _prealloc_samples def pr_debug(*args, **kwargs): if __debug__: kwargs["flush"] = True print(*args, **kwargs) def run_cmd(args): pr_debug(f"Running command: {args}") proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) pr_debug(f"Command exited with return code {proc.returncode}") if proc.returncode == 0: return proc.stdout else: raise ValueError(f"Subprocess {args} failed with exit code {proc.returncode}:\n{proc.stdout}") def sample_power(): ma = int(read_file(POWER_CURRENT_NODE)) * POWER_CURRENT_FACTOR / 1000 mv = int(read_file(POWER_VOLTAGE_NODE)) / 1000 mw = ma * mv / 1000 return ma, mv, abs(mw) def start_power_thread(sample_interval=POWER_SAMPLE_INTERVAL): def _power_thread(): global _power_samples sample_dest = _prealloc_samples count = 0 while True: # Sleep before first sample to avoid a low first reading time.sleep(sample_interval / 1000) # Check stop flag immediately after sleep to avoid a low last reading if _stop_power_mon: pr_debug("Stopping power monitor due to global stop flag") break current, voltage, power = sample_power() pr_debug(f"Power: {power} mW\t(sample {count} from {current} mA * {voltage} mV)") try: sample_dest[count] = power except IndexError: pr_debug("Pre-allocated sample slots exhausted, falling back to dynamic allocation") # If out of pre-allocated slots sample_dest.append(power) count += 1 if count < len(sample_dest): pr_debug(f"Truncating to first {count} samples from pre-allocated array") _power_samples = sample_dest[:count] pr_debug("Starting power monitor thread") thread = threading.Thread(target=_power_thread, daemon=True) thread.start() return thread def stop_power_thread(thread): global _stop_power_mon pr_debug("Setting flag to stop power monitor") _stop_power_mon = True pr_debug("Waiting for power monitor to stop") thread.join() _stop_power_mon = False return _power_samples def write_cpu(cpu, node, content): pr_debug(f"Writing CPU value: cpu{cpu}/{node} => {content}") with open(f"{SYS_CPU}/cpu{cpu}/{node}", "w") as f: f.write(content) def read_file(node): with open(node, "r") as f: content = f.read().strip() pr_debug(f"Reading file: {node} = {content}") return content def create_power_stats(time_ns, samples): sec = time_ns / 1e9 power = statistics.mean(samples) mj = power * sec joules = mj / 1000 return { "elapsed_sec": sec, "elapsed_ns": time_ns, "power_samples": samples, "power_mean": power, "energy_millijoules": mj, "energy_joules": joules, } def get_cpu_freqs(cpu): raw_freqs = read_file(f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_available_frequencies").split(" ") boost_node = f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_boost_frequencies" # Some devices have extra boost frequencies not in scaling_available_frequencies if os.path.exists(boost_node): raw_freqs += read_file(boost_node).split(" ") # Need to sort because different platforms have different orders freqs = sorted(set(int(freq) for freq in raw_freqs if freq)) return freqs def init_cpus(): print("Frequency domains: ", end="", flush=True) bench_cpus = [] for policy_dir in sorted(os.listdir(f"{SYS_CPU}/cpufreq")): if policy_dir.startswith("policy"): first_cpu = int(policy_dir[len("policy"):]) if first_cpu in REPLACE_CPUS: first_cpu = REPLACE_CPUS[first_cpu] print(f"cpu{first_cpu}", end=" ", flush=True) bench_cpus.append(first_cpu) else: pr_debug(f"Unrecognized file/dir in cpufreq: {policy_dir}") continue print() print("Offline CPUs: ", end="", flush=True) cpu_count = len(re.findall(r'processor\s+:\s+\d+', read_file("/proc/cpuinfo"))) for cpu in range(cpu_count): if cpu == HOUSEKEEPING_CPU: continue print(f"cpu{cpu}", end=" ", flush=True) write_cpu(cpu, "online", "0") print(flush=True) pr_debug("Minimizing frequency of housekeeping CPU") min_freq = min(get_cpu_freqs(HOUSEKEEPING_CPU)) pr_debug(f"Minimum frequency for {HOUSEKEEPING_CPU}: {min_freq} kHz") write_cpu(HOUSEKEEPING_CPU, "cpufreq/scaling_governor", "userspace") write_cpu(HOUSEKEEPING_CPU, "cpufreq/scaling_setspeed", str(min_freq)) pr_debug() return bench_cpus, cpu_count def check_charging(node, charging_value, charging_warned): if os.path.exists(node): psy_status = read_file(node) pr_debug(f"Power supply status at {node}: {psy_status}") if psy_status == charging_value and not charging_warned: print() print("=============== WARNING ===============") print("Detected power supply in charging state!") print("Power measurements will be invalid and benchmark results may be affected.") print("Unplug the device and restart the benchmark for valid results.") print("=============== WARNING ===============") print() return True return charging_warned def init_power(): global POWER_CURRENT_FACTOR pr_debug(f"Using power supply: {POWER_SUPPLY}") charging_warned = False charging_warned = check_charging(f"{POWER_SUPPLY}/status", "Charging", charging_warned) charging_warned = check_charging(f"/sys/class/power_supply/battery/status", "Charging", charging_warned) charging_warned = check_charging(f"/sys/class/power_supply/usb/present", "1", charging_warned) charging_warned = check_charging(f"/sys/class/power_supply/dc/present", "1", charging_warned) # Some PMICs may give unstable readings at this point pr_debug("Waiting for power usage to settle for initial current measurement") time.sleep(5) # Maxim PMICs used on Exynos devices report current in mA, not µA ref_current = int(read_file(POWER_CURRENT_NODE)) # Assumption: will never be below 1 mA if abs(ref_current) <= 1000: POWER_CURRENT_FACTOR = 1000 pr_debug(f"Scaling current by {POWER_CURRENT_FACTOR}x (derived from initial sample: {ref_current})") print(f"Sampling power every {POWER_SAMPLE_INTERVAL} ms") pr_debug(f"Pre-allocated {PREALLOC_SLOTS} sample slots for {PREALLOC_SECONDS} seconds") pr_debug(f"Power sample interval adjusted for power supply: {psy_name}") print("Baseline power usage: ", end="", flush=True) pr_debug("Waiting for power usage to settle") time.sleep(15) pr_debug() pr_debug("Measuring base power usage with only housekeeping CPU") # The power used for sampling might affect results here, so sample less often thread = start_power_thread(sample_interval=POWER_SAMPLE_INTERVAL * 2) time.sleep(60) base_power_samples = stop_power_thread(thread) base_power = statistics.median(base_power_samples) print(f"{base_power:.0f} mW") print() return base_power, base_power_samples def main(): bench_start_time = time.time() print(BANNER) pr_debug("Running in debug mode") pr_debug("Initializing CPU states") bench_cpus, cpu_count = init_cpus() pr_debug("Initializing power measurements") base_power, base_power_samples = init_power() pr_debug("Starting benchmark") pr_debug() cpus_data = {} for cpu in bench_cpus: print() print(f"===== CPU {cpu} =====") cpu_data = { "freqs": {} } cpus_data[cpu] = cpu_data pr_debug("Onlining CPU") write_cpu(cpu, "online", "1") pr_debug("Setting governor") write_cpu(cpu, "cpufreq/scaling_governor", "userspace") pr_debug("Getting frequencies") freqs = get_cpu_freqs(cpu) print("Frequencies:", " ".join(str(int(freq / 1000)) for freq in freqs)) print() # Some kernels may change the defaults pr_debug("Setting frequency limits") write_cpu(cpu, "cpufreq/scaling_min_freq", str(min(freqs))) write_cpu(cpu, "cpufreq/scaling_max_freq", str(max(freqs))) # Sometimes, reading back the limits immediately may give an incorrect result pr_debug("Waiting for frequency limits to take effect") time.sleep(1) # Bail out if the kernel is clamping our values pr_debug("Validating frequency limits") real_min_freq = int(read_file(f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_min_freq")) if real_min_freq != min(freqs): raise ValueError(f"Minimum frequency setting {min(freqs)} rejected by kernel; got {real_min_freq}") real_max_freq = int(read_file(f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_max_freq")) if real_max_freq != max(freqs): raise ValueError(f"Maximum frequency setting {max(freqs)} rejected by kernel; got {real_max_freq}") for freq in freqs: mhz = freq / 1000 print(f"{int(mhz):4d}: ", end="", flush=True) write_cpu(cpu, "cpufreq/scaling_setspeed", str(freq)) pr_debug("Waiting for frequency to settle") time.sleep(0.1) pr_debug("Validating frequency") real_freq = int(read_file(f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_cur_freq")) if real_freq != freq: raise ValueError(f"Frequency setting is {freq} but kernel is using {real_freq}") pr_debug("Waiting for power usage to settle") time.sleep(3) pr_debug("Measuring idle power usage") thread = start_power_thread() time.sleep(FREQ_IDLE_TIME) idle_power_samples = stop_power_thread(thread) idle_power = statistics.mean(idle_power_samples) idle_mj = idle_power * FREQ_IDLE_TIME idle_joules = idle_mj / 1000 pr_debug(f"Idle: {idle_power:4.0f} mW {idle_joules:4.1f} J") pr_debug("Running CoreMark...") thread = start_power_thread() start_time = time.time_ns() cm_out = run_cmd(["taskset", "-c", f"{cpu}", "coremark", *COREMARK_PERFORMANCE_RUN]) end_time = time.time_ns() power_samples = stop_power_thread(thread) pr_debug(cm_out) elapsed_sec = (end_time - start_time) / 1e9 # Extract score and iterations match = re.search(r'CoreMark 1\.0 : ([0-9.]+?) / ', cm_out) if not match.group(1): if "Must execute for at least 10 secs" in cm_out: raise ValueError("Benchmark ran too fast; increase COREMARK_ITERATIONS and try again") else: print(cm_out, file=sys.stderr) raise ValueError("Failed to parse CoreMark output") score = float(match.group(1)) match = re.search(r'Iterations\s+:\s+(\d+)', cm_out) iters = float(match.group(1)) # Adjust for base power usage power_samples = [sample - base_power for sample in power_samples] # Calculate power values power = statistics.mean(power_samples) # CoreMarks/MHz as per EEMBC specs cm_mhz = score / mhz # mW * sec = mJ mj = power * elapsed_sec joules = mj / 1000 # ULPMark-CM score = iterations per millijoule ulpmark_score = iters / mj print(f"{score:5.0f} {cm_mhz:3.1f} C/MHz {power:4.0f} mW {joules:4.1f} J {ulpmark_score:4.1f} I/mJ {elapsed_sec:5.1f} s") cpu_data["freqs"][freq] = { "active": { **create_power_stats(end_time - start_time, power_samples), "coremark_score": score, "coremarks_per_mhz": cm_mhz, "ulpmark_cm_score": ulpmark_score }, "idle": create_power_stats(int(FREQ_IDLE_TIME * 1e9), idle_power_samples), } # In case the CPU shares a freq domain with the housekeeping CPU, e.g. cpu1 pr_debug(f"Minimizing frequency of CPU: {min(freqs)} kHz") write_cpu(cpu, "cpufreq/scaling_setspeed", str(min(freqs))) pr_debug("Offlining CPU") write_cpu(cpu, "online", "0") print() # Make the rest run faster pr_debug("Maxing housekeeping CPU frequency") max_hk_freq = max(get_cpu_freqs(HOUSEKEEPING_CPU)) write_cpu(HOUSEKEEPING_CPU, "cpufreq/scaling_setspeed", str(max_hk_freq)) # OK to GC beyond this point as all the benchmarking is done pr_debug("Enabling Python GC") gc.enable() print() print("Benchmark finished!") bench_finish_time = time.time() pr_debug("Writing JSON data") data = { "version": 1, "total_elapsed_sec": bench_finish_time - bench_start_time, "housekeeping": create_power_stats(int(5 * 1e9), base_power_samples), "cpus": cpus_data, "meta": { "housekeeping_cpu": HOUSEKEEPING_CPU, "power_sample_interval": POWER_SAMPLE_INTERVAL, "cpu_count": cpu_count, }, } pr_debug("Writing JSON results") results_json = json.dumps(data) pr_debug(results_json) with open("/tmp/results.json", "w+") as f: f.write(results_json) pr_debug("Writing CSV results") with open("/tmp/results.csv", "w+") as f: fields = [ "CPU", "Frequency (kHz)", "CoreMarks (iter/s)", "CoreMarks/MHz", "Power (mW)", "Energy (J)", "ULPMark-CM (iter/mJ)", "Time (s)" ] writer = csv.DictWriter(f, fieldnames=fields) writer.writeheader() for cpu, cpu_data in cpus_data.items(): for freq, freq_data in cpu_data["freqs"].items(): freq_data = freq_data["active"] writer.writerow({ "CPU": cpu, "Frequency (kHz)": freq, "CoreMarks (iter/s)": freq_data["coremark_score"], "CoreMarks/MHz": freq_data["coremarks_per_mhz"], "Power (mW)": freq_data["power_mean"], "Energy (J)": freq_data["energy_joules"], "ULPMark-CM (iter/mJ)": freq_data["ulpmark_cm_score"], "Time (s)": freq_data["elapsed_sec"], }) if __name__ == "__main__": main() ================================================ FILE: boot-v1/.gitignore ================================================ *.img base board cmdline dtb dtb_offset hashtype header_version kernel_offset os_patch_level os_version pagesize ramdisk.gz ramdisk_offset second_offset tags_offset zImage ================================================ FILE: boot-v1/pack.sh ================================================ #!/usr/bin/env sh cd "$(dirname "$0")" ../packrd-gz.sh cp ../rd-new.cpio.gz ramdisk.gz mkbootimg \ --kernel zImage \ --ramdisk ramdisk.gz \ --cmdline "$(cat cmdline)" \ --board "$(cat board)" \ --base "$(cat base)" \ --pagesize "$(cat pagesize)" \ --kernel_offset "$(cat kernel_offset)" \ --ramdisk_offset "$(cat ramdisk_offset)" \ --second_offset "$(cat second_offset)" \ --tags_offset "$(cat tags_offset)" \ --os_version "$(cat os_version)" \ --os_patch_level "$(cat os_patch_level)" \ -o new.img ================================================ FILE: boot-v1/test.sh ================================================ #!/usr/bin/env sh set -eufo pipefail cd "$(dirname "$0")" cp ~/code/android/devices/zf6/proton/out/arch/arm64/boot/Image.gz-dtb zImage ./pack.sh adb reboot bootloader || true fastboot boot new.img ================================================ FILE: boot-v1/unpack.sh ================================================ #!/usr/bin/env sh cd "$(dirname "$0")" unpackbootimg -i "$1" rename "$1-" "" "$1-"* ================================================ FILE: config.sh ================================================ # Common config options for freqbench # Whether to enable verbose debug logging during the benchmark # DO NOT ENABLE for final benchmarking! # The extra framebuffer memory copies caused by it will influence results. DEBUG=false # How often to sample power usage while benchmarking (in milliseconds) # 0 = auto (default is based on fuel gauge) POWER_SAMPLE_INTERVAL=0 # Whether to expose an SSH server for debugging over virtual USB Ethernet # Do not enable for final benchmarking USB_DEBUG=false ================================================ FILE: dhcpd.conf ================================================ option domain-name-servers 8.8.8.8, 8.8.4.4; option subnet-mask 255.255.255.0; subnet 10.15.19.0 netmask 255.255.255.0 { range 10.15.19.100 10.15.19.254; } ================================================ FILE: init.sh ================================================ #!/usr/bin/env bash set -euo pipefail # Populate PATH and other basic env source /etc/profile # For htop config export HOME=/root source /config.sh # Must be in /persist or /tmp # /persist will be mounted from the cache partition if it exists OUT_DIR=/persist/freqbench reboot_end() { echo echo "Rebooting in 5 seconds..." # Rounded corner protection echo echo sleep 5 # Wait for volume down keypress #read -n1 # Wait for manual forced reboot #sleep inf reboot_with_cmd bootloader } saving_logs=false on_exit() { if ! $saving_logs; then save_logs fi echo echo echo "ERROR!" reboot_end } # Set trap before mounting in case devtmpfs fails trap on_exit EXIT # Mount essential pseudo-filesystems mount -t tmpfs tmpfs /dev mount -t proc proc /proc mount -t sysfs sysfs /sys mount -t tmpfs tmpfs /tmp # Populate /dev without devtmpfs mdev -s # Log to kernel log if no console is present if [[ ! -t 1 ]]; then exec > /dev/kmsg 2>&1 fi # Don't log anywhere #exec > /dev/null 2>&1 find_part_by_name() { pinfo="$(blkid -l --match-token "PARTLABEL=$1"; blkid -l --match-token "PARTLABEL=${1^^}")" # Check for existence first if [[ -z "$pinfo" ]]; then return 1 fi echo "$pinfo" | cut -d' ' -f1 | tr -d ':' } redact_arg() { sed -E "s/$1=[^ ]+/$1=REDACTED/" } redact_args() { redact_arg androidboot.serialno | \ redact_arg androidboot.wifimacaddr | \ redact_arg androidboot.btmacaddr | \ redact_arg androidboot.uid | \ redact_arg androidboot.ap_serial | \ redact_arg androidboot.cpuid | \ redact_arg LCD | \ redact_arg androidboot.id.jtag | \ redact_arg androidboot.em.did } # Add delay for error visibility on_error() { e=$? sleep 5 return $e } save_logs() { saving_logs=true # Gather system info # This is best-effort and does not strictly need to be present, so suppress errors here. set +e cat /proc/interrupts > /tmp/post_bench_interrupts.txt cat /proc/cmdline | redact_args > /tmp/cmdline.txt cat /proc/cpuinfo > /tmp/cpuinfo.txt dmesg | redact_args > /tmp/kernel.log uptime > /tmp/uptime.txt ps -A > /tmp/processes.txt echo "Kernel: $(cat /proc/version)" > /tmp/versions.txt echo "Python: $(python3 --version)" >> /tmp/versions.txt echo "Model: $(cat /sys/firmware/devicetree/base/model | tr '\0' ';')" > /tmp/device.txt echo "Compatible: $(cat /sys/firmware/devicetree/base/compatible | tr '\0' ';')" >> /tmp/device.txt mkdir /tmp/cpufreq_stats for policy in /sys/devices/system/cpu/cpufreq/policy* do pol_dir="/tmp/cpufreq_stats/$(basename "$policy" | sed 's/policy//')" mkdir "$pol_dir" # Frequency domains with too many OPPs will fail here cp "$policy/stats/"{time_in_state,total_trans,trans_table} "$pol_dir" 2> /dev/null || true done set -e mkdir /persist persist_part="$(find_part_by_name cache || find_part_by_name persist)" # We write everything to tmpfs and copy it to persist afterwards because writing to UFS will use power echo mount -o noatime "$persist_part" /persist echo "Writing logs and results to $OUT_DIR" rm -fr "$OUT_DIR" cp -r /tmp "$OUT_DIR" umount /persist sync # Saving logs multiple times is fine as long as we don't try to recurse saving_logs=false } try_write() { { echo "$2" > "$1"; } > /dev/null 2>&1 || true } # SSH debug over USB RNDIS set +e if $USB_DEBUG; then source /usb.sh fi set -e # Disable fbcon cursor blinking to reduce interference from its 1-second timer and memory ops try_write /sys/devices/virtual/graphics/fbcon/cursor_blink 0 # Disable hung task detection try_write /proc/sys/kernel/hung_task_timeout_secs 0 # Snapdragon: Enable cpuidle for more realistic conditions try_write /sys/module/lpm_levels/parameters/sleep_disabled 0 try_write /sys/module/msm_pm/parameters/sleep_disabled 0 # Exynos: Disable Exynos auto-hotplug to allow manual CPU control try_write /sys/power/cpuhotplug/enabled 0 try_write /sys/power/cpuhp/enabled 0 # Snapdragon: Decrease delay of qpnp_fg to 1000ms to get proper results try_write /sys/module/qpnp_fg/parameters/sram_update_period_ms 1000 # Snapdragon: Initialize aDSP for power supply on newer SoCs # On Snapdragon 888 (Qualcomm kernel 5.4) devices and newer, the DSP is # responsible for power and charging, so we need to initialize it before we can # read power usage from the fuel gauge. if uname -r | grep -q '^5\.' && grep -q Qualcomm /proc/cpuinfo; then # qrtr nameserver is required for DSP services to work properly qrtr-ns & echo "Booting DSP..." echo -n 1 > /sys/kernel/boot_adsp/boot sleep 3 if [[ "$(cat /sys/class/subsys/subsys_adsp/device/subsys*/state)" != "ONLINE" ]]; then echo "Failed to boot aDSP!" exit 1 fi fi cat /proc/interrupts > /tmp/pre_bench_interrupts.txt py_args=() if ! $DEBUG; then py_args+=(-OO) fi py_args+=(/bench.py "$POWER_SAMPLE_INTERVAL") time taskset 01 python3 "${py_args[@]}" 2>&1 | tee /tmp/run.log || on_error save_logs # To debug system load #htop reboot_end ================================================ FILE: mkbootimg.py ================================================ #!/usr/bin/env python # Copyright 2015, The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from argparse import ArgumentParser, FileType, Action from hashlib import sha1 from os import fstat import re from struct import pack BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096 def filesize(f): if f is None: return 0 try: return fstat(f.fileno()).st_size except OSError: return 0 def update_sha(sha, f): if f: sha.update(f.read()) f.seek(0) sha.update(pack('I', filesize(f))) else: sha.update(pack('I', 0)) def pad_file(f, padding): pad = (padding - (f.tell() & (padding - 1))) & (padding - 1) f.write(pack(str(pad) + 'x')) def get_number_of_pages(image_size, page_size): """calculates the number of pages required for the image""" return (image_size + page_size - 1) / page_size def get_recovery_dtbo_offset(args): """calculates the offset of recovery_dtbo image in the boot image""" num_header_pages = 1 # header occupies a page num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize) num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk), args.pagesize) num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize) dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages + num_ramdisk_pages + num_second_pages) return dtbo_offset def write_header_v3(args): BOOT_IMAGE_HEADER_V3_SIZE = 1580 BOOT_MAGIC = 'ANDROID!'.encode() args.output.write(pack('8s', BOOT_MAGIC)) args.output.write(pack( '4I', filesize(args.kernel), # kernel size in bytes filesize(args.ramdisk), # ramdisk size in bytes (args.os_version << 11) | args.os_patch_level, # os version and patch level BOOT_IMAGE_HEADER_V3_SIZE)) args.output.write(pack('4I', 0, 0, 0, 0)) # reserved args.output.write(pack('I', args.header_version)) # version of bootimage header args.output.write(pack('1536s', args.cmdline.encode())) pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE) def write_vendor_boot_header(args): VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112 BOOT_MAGIC = 'VNDRBOOT'.encode() args.vendor_boot.write(pack('8s', BOOT_MAGIC)) args.vendor_boot.write(pack( '5I', args.header_version, # version of header args.pagesize, # flash page size we assume args.base + args.kernel_offset, # kernel physical load addr args.base + args.ramdisk_offset, # ramdisk physical load addr filesize(args.vendor_ramdisk))) # vendor ramdisk size in bytes args.vendor_boot.write(pack('2048s', args.vendor_cmdline.encode())) args.vendor_boot.write(pack('I', args.base + args.tags_offset)) # physical addr for kernel tags args.vendor_boot.write(pack('16s', args.board.encode())) # asciiz product name args.vendor_boot.write(pack('I', VENDOR_BOOT_IMAGE_HEADER_V3_SIZE)) # header size in bytes if filesize(args.dtb) == 0: raise ValueError("DTB image must not be empty.") args.vendor_boot.write(pack('I', filesize(args.dtb))) # size in bytes args.vendor_boot.write(pack('Q', args.base + args.dtb_offset)) # dtb physical load address pad_file(args.vendor_boot, args.pagesize) def write_header(args): BOOT_IMAGE_HEADER_V1_SIZE = 1648 BOOT_IMAGE_HEADER_V2_SIZE = 1660 BOOT_MAGIC = 'ANDROID!'.encode() if args.header_version > 3: raise ValueError('Boot header version %d not supported' % args.header_version) elif args.header_version == 3: return write_header_v3(args) args.output.write(pack('8s', BOOT_MAGIC)) final_ramdisk_offset = (args.base + args.ramdisk_offset) if filesize(args.ramdisk) > 0 else 0 final_second_offset = (args.base + args.second_offset) if filesize(args.second) > 0 else 0 args.output.write(pack( '10I', filesize(args.kernel), # size in bytes args.base + args.kernel_offset, # physical load addr filesize(args.ramdisk), # size in bytes final_ramdisk_offset, # physical load addr filesize(args.second), # size in bytes final_second_offset, # physical load addr args.base + args.tags_offset, # physical addr for kernel tags args.pagesize, # flash page size we assume args.header_version, # version of bootimage header (args.os_version << 11) | args.os_patch_level)) # os version and patch level args.output.write(pack('16s', args.board.encode())) # asciiz product name args.output.write(pack('512s', args.cmdline[:512].encode())) sha = sha1() update_sha(sha, args.kernel) update_sha(sha, args.ramdisk) update_sha(sha, args.second) if args.header_version > 0: update_sha(sha, args.recovery_dtbo) if args.header_version > 1: update_sha(sha, args.dtb) img_id = pack('32s', sha.digest()) args.output.write(img_id) args.output.write(pack('1024s', args.cmdline[512:].encode())) if args.header_version > 0: args.output.write(pack('I', filesize(args.recovery_dtbo))) # size in bytes if args.recovery_dtbo: args.output.write(pack('Q', get_recovery_dtbo_offset(args))) # recovery dtbo offset else: args.output.write(pack('Q', 0)) # Will be set to 0 for devices without a recovery dtbo # Populate boot image header size for header versions 1 and 2. if args.header_version == 1: args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE)) elif args.header_version == 2: args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE)) if args.header_version > 1: if filesize(args.dtb) == 0: raise ValueError("DTB image must not be empty.") args.output.write(pack('I', filesize(args.dtb))) # size in bytes args.output.write(pack('Q', args.base + args.dtb_offset)) # dtb physical load address pad_file(args.output, args.pagesize) return img_id class ValidateStrLenAction(Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if 'maxlen' not in kwargs: raise ValueError('maxlen must be set') self.maxlen = int(kwargs['maxlen']) del kwargs['maxlen'] super(ValidateStrLenAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if len(values) > self.maxlen: raise ValueError( 'String argument too long: max {0:d}, got {1:d}'.format(self.maxlen, len(values))) setattr(namespace, self.dest, values) def write_padded_file(f_out, f_in, padding): if f_in is None: return f_out.write(f_in.read()) pad_file(f_out, padding) def parse_int(x): return int(x, 0) def parse_os_version(x): match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x) if match: a = int(match.group(1)) b = c = 0 if match.lastindex >= 2: b = int(match.group(2)) if match.lastindex == 3: c = int(match.group(3)) # 7 bits allocated for each field assert a < 128 assert b < 128 assert c < 128 return (a << 14) | (b << 7) | c return 0 def parse_os_patch_level(x): match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x) if match: y = int(match.group(1)) - 2000 m = int(match.group(2)) # 7 bits allocated for the year, 4 bits for the month assert 0 <= y < 128 assert 0 < m <= 12 return (y << 4) | m return 0 def parse_cmdline(): parser = ArgumentParser() parser.add_argument('--kernel', help='path to the kernel', type=FileType('rb')) parser.add_argument('--ramdisk', help='path to the ramdisk', type=FileType('rb')) parser.add_argument('--second', help='path to the 2nd bootloader', type=FileType('rb')) parser.add_argument('--dtb', help='path to dtb', type=FileType('rb')) recovery_dtbo_group = parser.add_mutually_exclusive_group() recovery_dtbo_group.add_argument('--recovery_dtbo', help='path to the recovery DTBO', type=FileType('rb')) recovery_dtbo_group.add_argument('--recovery_acpio', help='path to the recovery ACPIO', type=FileType('rb'), metavar='RECOVERY_ACPIO', dest='recovery_dtbo') parser.add_argument('--cmdline', help='extra arguments to be passed on the ' 'kernel command line', default='', action=ValidateStrLenAction, maxlen=1536) parser.add_argument('--vendor_cmdline', help='kernel command line arguments contained in vendor boot', default='', action=ValidateStrLenAction, maxlen=2048) parser.add_argument('--base', help='base address', type=parse_int, default=0x10000000) parser.add_argument('--kernel_offset', help='kernel offset', type=parse_int, default=0x00008000) parser.add_argument('--ramdisk_offset', help='ramdisk offset', type=parse_int, default=0x01000000) parser.add_argument('--second_offset', help='2nd bootloader offset', type=parse_int, default=0x00f00000) parser.add_argument('--dtb_offset', help='dtb offset', type=parse_int, default=0x01f00000) parser.add_argument('--os_version', help='operating system version', type=parse_os_version, default=0) parser.add_argument('--os_patch_level', help='operating system patch level', type=parse_os_patch_level, default=0) parser.add_argument('--tags_offset', help='tags offset', type=parse_int, default=0x00000100) parser.add_argument('--board', help='board name', default='', action=ValidateStrLenAction, maxlen=16) parser.add_argument('--pagesize', help='page size', type=parse_int, choices=[2**i for i in range(11, 15)], default=2048) parser.add_argument('--id', help='print the image ID on standard output', action='store_true') parser.add_argument('--header_version', help='boot image header version', type=parse_int, default=0) parser.add_argument('-o', '--output', help='output file name', type=FileType('wb')) parser.add_argument('--vendor_boot', help='vendor boot output file name', type=FileType('wb')) parser.add_argument('--vendor_ramdisk', help='path to the vendor ramdisk', type=FileType('rb')) return parser.parse_args() def write_data(args, pagesize): write_padded_file(args.output, args.kernel, pagesize) write_padded_file(args.output, args.ramdisk, pagesize) write_padded_file(args.output, args.second, pagesize) if args.header_version > 0 and args.header_version < 3: write_padded_file(args.output, args.recovery_dtbo, pagesize) if args.header_version == 2: write_padded_file(args.output, args.dtb, pagesize) def write_vendor_boot_data(args): write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize) write_padded_file(args.vendor_boot, args.dtb, args.pagesize) def main(): args = parse_cmdline() if args.vendor_boot is not None: if args.header_version < 3: raise ValueError('--vendor_boot not compatible with given header version') if args.vendor_ramdisk is None: raise ValueError('--vendor_ramdisk missing or invalid') write_vendor_boot_header(args) write_vendor_boot_data(args) if args.output is not None: if args.kernel is None: raise ValueError('kernel must be supplied when creating a boot image') if args.second is not None and args.header_version > 2: raise ValueError('--second not compatible with given header version') img_id = write_header(args) if args.header_version > 2: write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE) else: write_data(args, args.pagesize) if args.id and img_id is not None: # Python 2's struct.pack returns a string, but py3 returns bytes. if isinstance(img_id, str): img_id = [ord(x) for x in img_id] print('0x' + ''.join('{:02x}'.format(c) for c in img_id)) if __name__ == '__main__': main() ================================================ FILE: pack-img.sh ================================================ #!/usr/bin/env bash oldwd="$PWD" cd "$(dirname "$0")" ./packrd-gz.sh python mkbootimg.py \ --header_version 2 \ --os_version 11.0.0 \ --os_patch_level 2020-11 \ --ramdisk rd-new.cpio.gz \ --kernel alpine-Image.lz4 \ --dtb alpine-dt.dtb \ --cmdline 'console=ttyMSM0,115200n8 androidboot.console=ttyMSM0 printk.devkmsg=on msm_rtb.filter=0x237 ehci-hcd.park=3 service_locator.enable=1 androidboot.memcg=1 cgroup.memory=nokmem lpm_levels.sleep_disabled=1 usbcore.autosuspend=7 androidboot.usbcontroller=a600000.dwc3 swiotlb=2048 androidboot.boot_devices=soc/1d84000.ufshc loop.max_part=7 snd_soc_cs35l41_i2c.async_probe=1 i2c_qcom_geni.async_probe=1 st21nfc.async_probe=1 spmi_pmic_arb.async_probe=1 ufs_qcom.async_probe=1 buildvariant=user' \ --kernel_offset 0x8000 \ --ramdisk_offset 0x1000000 \ --dtb_offset 0x1f00000 \ --tags_offset 0x100 \ --pagesize 4096 \ --output "$oldwd/${1:-bench.img}" ================================================ FILE: pack-zip.sh ================================================ #!/usr/bin/env bash oldwd="$PWD" cd "$(dirname "$0")" ./packrd-gz.sh pushd anykernel cp ../rd-new.cpio.gz . rm -f "$oldwd/freqbench-installer.zip" zip -r0 "$oldwd/freqbench-installer.zip" . popd ================================================ FILE: packrd-gz.sh ================================================ #!/usr/bin/env bash set -eufo pipefail cd "$(dirname "$0")" if command -v pigz > /dev/null 2>&1; then GZIP="pigz" else GZIP="gzip" fi cp -af init.sh rd/init cp -af config.sh usb.sh bench.py dhcpd.conf rd/ mkdir -p rd/{tmp,sys,srv,run,root,proc,opt,mnt,home,dev} mkdir -p rd/var/{tmp,opt,mail,log,local,empty} oldwd="$PWD" cd rd find . | cpio -o -H newc | "$GZIP" -9c > "$oldwd/rd-new.cpio.gz" ================================================ FILE: postprocess/cross_cpu_cluster_graph.py ================================================ #!/usr/bin/env python3 import json import csv import sys import matplotlib.pyplot as plt CPU_LABELS = { 1: "Little", 4: "Big", 6: "Big", 7: "Prime" } COL_LABELS = { "power_mean": "Power (mW)", "coremark_score": "Performance (iter/s)", "energy_joules": "Energy (J)", "energy_millijoules": "Energy (mJ)", "elapsed_sec": "Time (s)", "coremarks_per_mhz": "CoreMarks/MHz", "ulpmark_cm_score": "ULPMark-CM (iter/mJ)", } flags = set() socs = {} freq_load = "active" col_name = None for i, arg in enumerate(sys.argv[1:]): if ":" in arg: name, path = arg.split(":") with open(path, "r") as f: socs[name] = json.loads(f.read()) elif "+" in arg: flag = arg[1:] flags.add(flag) elif "/" in arg: freq_load, col_name = arg.split("/") else: col_name = arg col_label = COL_LABELS[col_name] if col_name in COL_LABELS else col_name plt.ylabel(col_label) plt.xlabel("Frequency (MHz)") plt.title(col_label) for soc_i, (soc, soc_data) in enumerate(socs.items()): cpus_data = soc_data["cpus"] for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) freqs = [int(freq) / 1000 for freq in cpu_data["freqs"].keys()] raw_values = [freq_data[freq_load][col_name] for freq_data in cpu_data["freqs"].values()] values = [] for freq, freq_data in cpu_data["freqs"].items(): if "minscl" in flags: curv = freq_data[freq_load][col_name] minv = min(raw_values) values.append(curv - minv) else: values.append(freq_data[freq_load][col_name]) cpu_label = CPU_LABELS[cpu] if cpu in CPU_LABELS else f"CPU {cpu}" val_label = f"{soc} {cpu_label}" color = f"C{soc_i}" if "soccolor" in flags: plt.plot(freqs, values, color, label=val_label) else: plt.plot(freqs, values, label=val_label) plt.legend() plt.show() ================================================ FILE: postprocess/cross_cpu_voltage_graph.py ================================================ #!/usr/bin/env python3 import sys import matplotlib.pyplot as plt import re import collections CPU_LABELS = { 1: "Little", 4: "Big", 6: "Big", 7: "Prime" } flags = set() socs = {} freq_load = "active" col_name = None for i, arg in enumerate(sys.argv[1:]): if ":" in arg: name, path = arg.split(":") with open(path, "r") as f: socs[name] = [[int(v) for v in re.split(r"[\.=]", opp)] for opp in f.read().strip().split(" ")] elif "+" in arg: flag = arg[1:] flags.add(flag) plt.ylabel("Voltage (mV)") plt.xlabel("Frequency (MHz)") plt.title("CPU Voltages") for soc_i, (soc, soc_data) in enumerate(socs.items()): cpu_freqs = collections.defaultdict(list) cpu_volts = collections.defaultdict(list) for cpu, freq, volt in soc_data: freq /= 1000 volt /= 1000 cpu_freqs[cpu].append(freq) cpu_volts[cpu].append(volt) for cpu, freqs in cpu_freqs.items(): volts = cpu_volts[cpu] cpu_label = CPU_LABELS[cpu] if cpu in CPU_LABELS else f"CPU {cpu}" val_label = f"{soc} {cpu_label}" color = f"C{soc_i}" if "soccolor" in flags: plt.plot(freqs, volts, color, label=val_label) else: plt.plot(freqs, volts, label=val_label) plt.legend() plt.show() ================================================ FILE: postprocess/efficient_freqs.py ================================================ #!/usr/bin/env python3 import json import sys with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) cpus_data = json_data["cpus"] for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) print(f"cpu{cpu}:") eff_freqs = set() # Start with the most efficient freq freqs = cpu_data["freqs"] max_eff_freq, max_eff = max( ((int(freq), freq_data["active"]["ulpmark_cm_score"]) for freq, freq_data in freqs.items()), key=lambda opp: opp[1] ) print((max_eff_freq, max_eff)) eff_freqs.add(max_eff_freq) # Add the max freq max_freq = max(int(freq) for freq in freqs.keys()) max_freq_eff = freqs[str(max_freq)]["active"]["ulpmark_cm_score"] eff_freqs.add(max_freq) # Add efficient intermediate freqs last_freq = max_eff_freq freq_keys = list(map(int, freqs.keys())) for freq_i, (freq, freq_data) in enumerate(freqs.items()): freq = int(freq) eff = freq_data["active"]["ulpmark_cm_score"] # Clock compensation: if 500 MHz passed with no freq step if freq - last_freq < 500000: # Ignore freqs slower than most efficient if freq < max_eff_freq: continue # Less efficient than max freq if eff < max_freq_eff: continue # Less efficient than next freq #next_freq = freq_keys[min(freq_keys.index(freq) + 1, len(freqs) - 1)] #if freqs[str(next_freq)]["active"]["ulpmark_cm_score"] >= eff: # continue last_freq = freq eff_freqs.add(freq) print(freq) # Remove inefficient freqs ineff_freqs = freqs.keys() - eff_freqs for freq in ineff_freqs: del freqs[str(freq)] print() with open(sys.argv[2], "w+") as f: f.write(json.dumps(json_data)) ================================================ FILE: postprocess/filter_freqs.py ================================================ #!/usr/bin/env python3 import json import sys with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) allowed_opps = set(tuple(int(v) for v in opp.split(".")) for opp in sys.argv[3:]) cpus_data = json_data["cpus"] for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) freqs = cpu_data["freqs"] remove_freqs = freqs.keys() - set(str(freq) for opp_cpu, freq in allowed_opps if opp_cpu == cpu) for freq in remove_freqs: del freqs[str(freq)] with open(sys.argv[2], "w+") as f: f.write(json.dumps(json_data)) ================================================ FILE: postprocess/idle_csv.py ================================================ #!/usr/bin/env python3 import json import csv import sys with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) cpus_data = json_data["cpus"] with open(sys.argv[2], "w+") as f: fields = [ "CPU", "Frequency (kHz)", "Power (mW)", "Energy (J)" ] writer = csv.DictWriter(f, fieldnames=fields) writer.writeheader() for cpu, cpu_data in cpus_data.items(): for freq, freq_data in cpu_data["freqs"].items(): freq_data = freq_data["idle"] writer.writerow({ "CPU": cpu, "Frequency (kHz)": freq, "Power (mW)": freq_data["power_mean"], "Energy (J)": freq_data["energy_joules"], }) ================================================ FILE: postprocess/legacy_energy_model.py ================================================ #!/usr/bin/env python3 import json import sys import re import statistics with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) if len(sys.argv) > 2: key_type, value_type = sys.argv[2].split("/") else: key_type = "freq" value_type = "power" if len(sys.argv) > 3: old_model = {"core": [], "cluster": []} # Example: # { # "core": [ # { # "busy": [1, 2, 3, 4, 5, 6], # "idle": [3, 2, 1], # }, # { # "busy": [10, 20, 30, 40, 50, 60], # "idle": [5, 3, 2], # }, # ], # "cluster": [ # { # "busy": [1, 1, 1, 2, 3, 3], # "idle": [2, 2, 1], # }, # { # "busy": [2, 2, 3, 4, 4, 5], # "idle": [4, 2, 1], # }, # ], # } with open(sys.argv[3], "r") as f: old_dtsi = f.read().split("\n") # Rudimentary line-by-line DTS parser, will break with unexpected data cpu_i = -1 data_block = None cost_block = None for line in old_dtsi: match = re.search(r"(core|cluster)-cost(\d+)\s+\{", line) if match: new_data_block = match.group(1) if new_data_block == data_block: cpu_i += 1 else: cpu_i = 0 data_block = new_data_block old_model[data_block].append({}) continue match = re.search(r"(busy|idle)-cost-data\s+=", line) if match: cost_block = match.group(1) old_model[data_block][cpu_i][cost_block] = [] continue if cost_block == "busy": match = re.search(r"^\s*(\d+)\s+(\d+)\s*$", line) if match: key = int(match.group(1)) value = int(match.group(2)) # Ignore keys (cap/freq) and use indices instead # Assumption: all freqs are present in both old_model[data_block][cpu_i]["busy"].append(value) elif cost_block == "idle": if re.match(r"^\s*(?:\d+\s*)+$", line): # Extend array to accomodate single-line costs, e.g. qcom format idle_costs = [int(cost) for cost in re.split(r"\s+", line.strip())] old_model[data_block][cpu_i]["idle"] += idle_costs if re.match(r"^\s*>;\s*$", line): cost_block = None else: old_model = None cpus_data = json_data["cpus"] DTS_HEADER = """/* * Auto-generated legacy EAS energy model for incorporation in SoC device tree. * Generated by freqbench postprocessing scripts using freqbench results. * More info at https://github.com/kdrag0n/freqbench */ / { \tcpus {""" print(DTS_HEADER, end="") # Performance efficiency unscaled_cpu_cm_mhz = {} for cpu, cpu_data in cpus_data.items(): last_freq, last_freq_data = max(cpu_data["freqs"].items(), key=lambda f: f[0]) cm_mhz = last_freq_data["active"]["coremarks_per_mhz"] unscaled_cpu_cm_mhz[int(cpu)] = cm_mhz # Scale performance efficiency max_cm_mhz = max(unscaled_cpu_cm_mhz.values()) scaled_cpu_cm_mhz = { cpu: cm_mhz / max_cm_mhz * 1024 for cpu, cm_mhz in unscaled_cpu_cm_mhz.items() } # Pass 1: performance efficiency (for capacity scaling) for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) cm_mhz_norm = scaled_cpu_cm_mhz[cpu] lb = "{" rb = "}" print(f""" \t\tcpu@{0 if cpu == 1 else cpu} {lb} \t\t\tefficiency = <{cm_mhz_norm:.0f}>; \t\t\tcapacity-dmips-mhz = <{cm_mhz_norm:.0f}>; \t\t{rb};""") print("""\t}; \tenergy_costs: energy-costs { \t\tcompatible = "sched-energy";""") max_perf = max( max(freq["active"]["coremark_score"] for freq in cpu["freqs"].values()) for cpu in cpus_data.values() ) # Pass 2: core costs core_cost_keys = [] for cpu_i, (cpu, cpu_data) in enumerate(cpus_data.items()): cpu = int(cpu) core_cost_keys.append([]) lb = "{" rb = "}" print(f""" \t\tCPU_COST_{cpu_i}: core-cost{cpu_i} {lb} \t\t\tbusy-cost-data = <""") for freq, freq_data in cpu_data["freqs"].items(): freq = int(freq) if value_type == "power": value = freq_data["active"]["power_mean"] elif value_type == "energy": value = freq_data["active"]["energy_millijoules"] if key_type == "freq": key = freq print(f"\t\t\t\t{key: 8.0f}{value: 5.0f}") elif key_type == "cap": # Floor to match CPU integer math key = freq_data["active"]["coremark_score"] / max_perf * 1024 print(f"\t\t\t\t{key: 5.0f}{value: 5.0f}") core_cost_keys[cpu_i].append(key) if old_model: idle_costs = " ".join(map(str, old_model["core"][cpu_i]["idle"])) else: # Placeholder in lieu of real data idle_costs = "3 2 1" print(f"""\t\t\t>; \t\t\tidle-cost-data = < \t\t\t\t{idle_costs} \t\t\t>; \t\t{rb};""") # Pass 3: cluster costs if old_model: for cpu_i, new_keys in enumerate(core_cost_keys): lb = "{" rb = "}" print(f""" \t\tCLUSTER_COST_{cpu_i}: cluster-cost{cpu_i} {lb} \t\t\tbusy-cost-data = <""") for cost_i, cost in enumerate(old_model["cluster"][cpu_i]["busy"]): # Ignore silently for now instead of logging to stderr to make copy-pasting easier # This happens with qcom speed bin differences on newer SoCs if cost_i >= len(new_keys): continue key = new_keys[cost_i] print(f"\t\t\t\t{key: 5.0f}{cost: 5.0f}") idle_costs = " ".join(map(str, old_model["cluster"][cpu_i]["idle"])) print(f"""\t\t\t>; \t\t\tidle-cost-data = < \t\t\t\t{idle_costs} \t\t\t>; \t\t{rb};""") print("""\t}; };""") ================================================ FILE: postprocess/requirements.txt ================================================ matplotlib ================================================ FILE: postprocess/simplified_energy_model.py ================================================ #!/usr/bin/env python3 import json import csv import sys import re import statistics with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) cpus_data = json_data["cpus"] DTS_HEADER = """/* * Auto-generated simplified EAS energy model for incorporation in SoC device tree. * Generated by freqbench postprocessing scripts using freqbench results. * More info at https://github.com/kdrag0n/freqbench */ / { \tcpus {""" print(DTS_HEADER) mode = "power" voltages = {} for arg in sys.argv[2:]: cluster, freq, voltage = map(int, re.split(r"\.|=", arg)) voltages[(cluster, freq)] = voltage # Performance efficiency unscaled_cpu_cm_mhz = {} for cpu, cpu_data in cpus_data.items(): last_freq, last_freq_data = max(cpu_data["freqs"].items(), key=lambda f: f[0]) cm_mhz = last_freq_data["active"]["coremarks_per_mhz"] unscaled_cpu_cm_mhz[int(cpu)] = cm_mhz # Scale performance efficiency max_cm_mhz = max(unscaled_cpu_cm_mhz.values()) scaled_cpu_cm_mhz = { cpu: cm_mhz / max_cm_mhz * 1024 for cpu, cm_mhz in unscaled_cpu_cm_mhz.items() } for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) dpcs = [] for freq, freq_data in cpu_data["freqs"].items(): freq = int(freq) if (cpu, freq) not in voltages: continue if mode == "power": # µW cost = freq_data["active"]["power_mean"] * 1000 elif mode == "energy": cost = freq_data["active"]["energy_millijoules"] * 10 mhz = freq / 1000 v = voltages[(cpu, freq)] / 1_000_000 dpc = cost / mhz / v**2 dpcs.append(dpc) cm_mhz_norm = scaled_cpu_cm_mhz[cpu] if dpcs: dpc = statistics.mean(dpcs) else: dpc = 0 lb = "{" rb = "}" print(f"""\t\tcpu@{0 if cpu == 1 else cpu} {lb} \t\t\tefficiency = <{cm_mhz_norm:.0f}>; \t\t\tcapacity-dmips-mhz = <{cm_mhz_norm:.0f}>; \t\t\tdynamic-power-coefficient = <{dpc:.0f}>; \t\t{rb}; """) print("""\t}; };""") ================================================ FILE: postprocess/unified_cluster_col.py ================================================ #!/usr/bin/env python3 import json import csv import sys with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) col_name = sys.argv[2] cpus_data = json_data["cpus"] with open(sys.argv[3], "w+") as f: fields = [ "Frequency (kHz)", *[f"CPU {cpu} {col_name}" for cpu in cpus_data.keys()] ] writer = csv.DictWriter(f, fieldnames=fields) writer.writeheader() freqs = [] for cpu, cpu_data in cpus_data.items(): freqs += cpu_data["freqs"].keys() freqs.sort(reverse=True) for freq in freqs: row = { "Frequency (kHz)": freq } for cpu, cpu_data in cpus_data.items(): if freq in cpu_data["freqs"]: row[f"CPU {cpu} {col_name}"] = str(cpu_data["freqs"][freq]["active"][col_name]) else: row[f"CPU {cpu} {col_name}"] = "" writer.writerow(row) ================================================ FILE: postprocess/unified_cluster_graph.py ================================================ #!/usr/bin/env python3 import json import csv import sys import matplotlib.pyplot as plt with open(sys.argv[1], "r") as f: json_data = json.loads(f.read()) CPU_LABELS = { 1: "Little", 4: "Big", 6: "Big", 7: "Prime" } COL_LABELS = { "power_mean": "Power (mW)", "coremark_score": "Performance (iter/s)", "energy_joules": "Energy (J)", "energy_millijoules": "Energy (mJ)", "elapsed_sec": "Time (s)", "coremarks_per_mhz": "CoreMarks/MHz", "ulpmark_cm_score": "ULPMark-CM (iter/mJ)", } col_name = sys.argv[2] cpus_data = json_data["cpus"] col_label = COL_LABELS[col_name] if col_name in COL_LABELS else col_name plt.ylabel(col_label) plt.xlabel("Frequency (MHz)") if len(sys.argv) > 3: plt.title(sys.argv[3]) else: plt.title(col_label) for cpu, cpu_data in cpus_data.items(): cpu = int(cpu) freqs = [int(freq) / 1000 for freq in cpu_data["freqs"].keys()] values = [freq_data["active"][col_name] for freq_data in cpu_data["freqs"].values()] cpu_label = CPU_LABELS[cpu] if cpu in CPU_LABELS else f"CPU {cpu}" plt.plot(freqs, values, label=cpu_label) plt.legend() plt.show() ================================================ FILE: rd/dev/null ================================================ ================================================ FILE: rd/etc/alpine-release ================================================ 3.12.0 ================================================ FILE: rd/etc/apk/arch ================================================ aarch64 ================================================ FILE: rd/etc/apk/keys/alpine-devel@lists.alpinelinux.org-524d27bb.rsa.pub ================================================ -----BEGIN PUBLIC KEY----- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr8s1q88XpuJWLCZALdKj lN8wg2ePB2T9aIcaxryYE/Jkmtu+ZQ5zKq6BT3y/udt5jAsMrhHTwroOjIsF9DeG e8Y3vjz+Hh4L8a7hZDaw8jy3CPag47L7nsZFwQOIo2Cl1SnzUc6/owoyjRU7ab0p iWG5HK8IfiybRbZxnEbNAfT4R53hyI6z5FhyXGS2Ld8zCoU/R4E1P0CUuXKEN4p0 64dyeUoOLXEWHjgKiU1mElIQj3k/IF02W89gDj285YgwqA49deLUM7QOd53QLnx+ xrIrPv3A+eyXMFgexNwCKQU9ZdmWa00MjjHlegSGK8Y2NPnRoXhzqSP9T9i2HiXL VQIDAQAB -----END PUBLIC KEY----- ================================================ FILE: rd/etc/apk/keys/alpine-devel@lists.alpinelinux.org-58199dcc.rsa.pub ================================================ -----BEGIN PUBLIC KEY----- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3v8/ye/V/t5xf4JiXLXa hWFRozsnmn3hobON20GdmkrzKzO/eUqPOKTpg2GtvBhK30fu5oY5uN2ORiv2Y2ht eLiZ9HVz3XP8Fm9frha60B7KNu66FO5P2o3i+E+DWTPqqPcCG6t4Znk2BypILcit wiPKTsgbBQR2qo/cO01eLLdt6oOzAaF94NH0656kvRewdo6HG4urbO46tCAizvCR CA7KGFMyad8WdKkTjxh8YLDLoOCtoZmXmQAiwfRe9pKXRH/XXGop8SYptLqyVVQ+ tegOD9wRs2tOlgcLx4F/uMzHN7uoho6okBPiifRX+Pf38Vx+ozXh056tjmdZkCaV aQIDAQAB -----END PUBLIC KEY----- ================================================ FILE: rd/etc/apk/repositories ================================================ http://dl-cdn.alpinelinux.org/alpine/v3.12/main http://dl-cdn.alpinelinux.org/alpine/v3.12/community ================================================ FILE: rd/etc/apk/world ================================================ alpine-baselayout alpine-keys apk-tools bash busybox dhcp dropbear htop libc-utils python3 sgdisk util-linux ================================================ FILE: rd/etc/crontabs/root ================================================ # do daily/weekly/monthly maintenance # min hour day month weekday command */15 * * * * run-parts /etc/periodic/15min 0 * * * * run-parts /etc/periodic/hourly 0 2 * * * run-parts /etc/periodic/daily 0 3 * * 6 run-parts /etc/periodic/weekly 0 5 1 * * run-parts /etc/periodic/monthly ================================================ FILE: rd/etc/dhcp/dhcpd.conf.example ================================================ # dhcpd.conf # # Sample configuration file for ISC dhcpd # # option definitions common to all supported networks... option domain-name "example.org"; option domain-name-servers ns1.example.org, ns2.example.org; default-lease-time 600; max-lease-time 7200; # Use this to enble / disable dynamic dns updates globally. #ddns-update-style none; # If this DHCP server is the official DHCP server for the local # network, the authoritative directive should be uncommented. #authoritative; # Use this to send dhcp log messages to a different log file (you also # have to hack syslog.conf to complete the redirection). log-facility local7; # No service will be given on this subnet, but declaring it helps the # DHCP server to understand the network topology. subnet 10.152.187.0 netmask 255.255.255.0 { } # This is a very basic subnet declaration. subnet 10.254.239.0 netmask 255.255.255.224 { range 10.254.239.10 10.254.239.20; option routers rtr-239-0-1.example.org, rtr-239-0-2.example.org; } # This declaration allows BOOTP clients to get dynamic addresses, # which we don't really recommend. subnet 10.254.239.32 netmask 255.255.255.224 { range dynamic-bootp 10.254.239.40 10.254.239.60; option broadcast-address 10.254.239.31; option routers rtr-239-32-1.example.org; } # A slightly different configuration for an internal subnet. subnet 10.5.5.0 netmask 255.255.255.224 { range 10.5.5.26 10.5.5.30; option domain-name-servers ns1.internal.example.org; option domain-name "internal.example.org"; option routers 10.5.5.1; option broadcast-address 10.5.5.31; default-lease-time 600; max-lease-time 7200; } # Hosts which require special configuration options can be listed in # host statements. If no address is specified, the address will be # allocated dynamically (if possible), but the host-specific information # will still come from the host declaration. host passacaglia { hardware ethernet 0:0:c0:5d:bd:95; filename "vmunix.passacaglia"; server-name "toccata.example.com"; } # Fixed IP addresses can also be specified for hosts. These addresses # should not also be listed as being available for dynamic assignment. # Hosts for which fixed IP addresses have been specified can boot using # BOOTP or DHCP. Hosts for which no fixed address is specified can only # be booted with DHCP, unless there is an address range on the subnet # to which a BOOTP client is connected which has the dynamic-bootp flag # set. host fantasia { hardware ethernet 08:00:07:26:c0:a5; fixed-address fantasia.example.com; } # You can declare a class of clients and then do address allocation # based on that. The example below shows a case where all clients # in a certain class get addresses on the 10.17.224/24 subnet, and all # other clients get addresses on the 10.0.29/24 subnet. class "foo" { match if substring (option vendor-class-identifier, 0, 4) = "SUNW"; } shared-network 224-29 { subnet 10.17.224.0 netmask 255.255.255.0 { option routers rtr-224.example.org; } subnet 10.0.29.0 netmask 255.255.255.0 { option routers rtr-29.example.org; } pool { allow members of "foo"; range 10.17.224.10 10.17.224.250; } pool { deny members of "foo"; range 10.0.29.10 10.0.29.230; } } ================================================ FILE: rd/etc/fstab ================================================ /dev/cdrom /media/cdrom iso9660 noauto,ro 0 0 /dev/usbdisk /media/usb vfat noauto,ro 0 0 ================================================ FILE: rd/etc/group ================================================ root:x:0:root bin:x:1:root,bin,daemon daemon:x:2:root,bin,daemon sys:x:3:root,bin,adm adm:x:4:root,adm,daemon tty:x:5: disk:x:6:root,adm lp:x:7:lp mem:x:8: kmem:x:9: wheel:x:10:root floppy:x:11:root mail:x:12:mail news:x:13:news uucp:x:14:uucp man:x:15:man cron:x:16:cron console:x:17: audio:x:18: cdrom:x:19: dialout:x:20:root ftp:x:21: sshd:x:22: input:x:23: at:x:25:at tape:x:26:root video:x:27:root netdev:x:28: readproc:x:30: squid:x:31:squid xfs:x:33:xfs kvm:x:34:kvm games:x:35: shadow:x:42: cdrw:x:80: usb:x:85: vpopmail:x:89: users:x:100:games ntp:x:123: nofiles:x:200: smmsp:x:209:smmsp locate:x:245: abuild:x:300: utmp:x:406: ping:x:999: nogroup:x:65533: nobody:x:65534: dhcp:x:101:dhcp ================================================ FILE: rd/etc/group- ================================================ root:x:0:root bin:x:1:root,bin,daemon daemon:x:2:root,bin,daemon sys:x:3:root,bin,adm adm:x:4:root,adm,daemon tty:x:5: disk:x:6:root,adm lp:x:7:lp mem:x:8: kmem:x:9: wheel:x:10:root floppy:x:11:root mail:x:12:mail news:x:13:news uucp:x:14:uucp man:x:15:man cron:x:16:cron console:x:17: audio:x:18: cdrom:x:19: dialout:x:20:root ftp:x:21: sshd:x:22: input:x:23: at:x:25:at tape:x:26:root video:x:27:root netdev:x:28: readproc:x:30: squid:x:31:squid xfs:x:33:xfs kvm:x:34:kvm games:x:35: shadow:x:42: cdrw:x:80: usb:x:85: vpopmail:x:89: users:x:100:games ntp:x:123: nofiles:x:200: smmsp:x:209:smmsp locate:x:245: abuild:x:300: utmp:x:406: ping:x:999: nogroup:x:65533: nobody:x:65534: dhcp:x:101: ================================================ FILE: rd/etc/hostname ================================================ localhost ================================================ FILE: rd/etc/hosts ================================================ 127.0.0.1 localhost localhost.localdomain ::1 localhost localhost.localdomain ================================================ FILE: rd/etc/inittab ================================================ # /etc/inittab ::sysinit:/sbin/openrc sysinit ::sysinit:/sbin/openrc boot ::wait:/sbin/openrc default # Set up a couple of getty's tty1::respawn:/sbin/getty 38400 tty1 tty2::respawn:/sbin/getty 38400 tty2 tty3::respawn:/sbin/getty 38400 tty3 tty4::respawn:/sbin/getty 38400 tty4 tty5::respawn:/sbin/getty 38400 tty5 tty6::respawn:/sbin/getty 38400 tty6 # Put a getty on the serial port #ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100 # Stuff to do for the 3-finger salute ::ctrlaltdel:/sbin/reboot # Stuff to do before rebooting ::shutdown:/sbin/openrc shutdown ================================================ FILE: rd/etc/inputrc ================================================ # /etc/inputrc - global inputrc for libreadline # See readline(3readline) and `info rluserman' for more information. # Be 8 bit clean. set input-meta on set output-meta on # To allow the use of 8bit-characters like the german umlauts, uncomment # the line below. However this makes the meta key not work as a meta key, # which is annoying to those which don't need to type in 8-bit characters. # set convert-meta off # try to enable the application keypad when it is called. Some systems # need this to enable the arrow keys. # set enable-keypad on # see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys # do not bell on tab-completion # set bell-style none # set bell-style visible # some defaults / modifications for the emacs mode $if mode=emacs # allow the use of the Home/End keys "\e[1~": beginning-of-line "\e[4~": end-of-line # allow the use of the Delete/Insert keys "\e[3~": delete-char "\e[2~": quoted-insert # mappings for "page up" and "page down" to step to the beginning/end # of the history # "\e[5~": beginning-of-history # "\e[6~": end-of-history # alternate mappings for "page up" and "page down" to search the history # "\e[5~": history-search-backward # "\e[6~": history-search-forward # mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving "\e[1;5C": forward-word "\e[1;5D": backward-word "\e[5C": forward-word "\e[5D": backward-word "\e\e[C": forward-word "\e\e[D": backward-word $if term=rxvt "\e[7~": beginning-of-line "\e[8~": end-of-line "\eOc": forward-word "\eOd": backward-word $endif # for non RH/Debian xterm, can't hurt for RH/Debian xterm # "\eOH": beginning-of-line # "\eOF": end-of-line # for freebsd console # "\e[H": beginning-of-line # "\e[F": end-of-line $endif ================================================ FILE: rd/etc/issue ================================================ Welcome to Alpine Linux 3.12 Kernel \r on an \m (\l) ================================================ FILE: rd/etc/logrotate.d/acpid ================================================ /var/log/acpid.log { missingok notifempty sharedscripts postrotate /etc/init.d/acpid --quiet --ifstarted restart || true endscript } ================================================ FILE: rd/etc/modprobe.d/aliases.conf ================================================ # Aliases to tell insmod/modprobe which modules to use # Uncomment the network protocols you don't want loaded: # alias net-pf-1 off # Unix # alias net-pf-2 off # IPv4 # alias net-pf-3 off # Amateur Radio AX.25 # alias net-pf-4 off # IPX # alias net-pf-5 off # DDP / appletalk # alias net-pf-6 off # Amateur Radio NET/ROM # alias net-pf-9 off # X.25 # alias net-pf-10 off # IPv6 # alias net-pf-11 off # ROSE / Amateur Radio X.25 PLP # alias net-pf-19 off # Acorn Econet alias char-major-10-175 agpgart alias char-major-10-200 tun alias char-major-81 bttv alias char-major-108 ppp_generic alias /dev/ppp ppp_generic alias tty-ldisc-3 ppp_async alias tty-ldisc-14 ppp_synctty alias ppp-compress-21 bsd_comp alias ppp-compress-24 ppp_deflate alias ppp-compress-26 ppp_deflate # Crypto modules (see http://www.kerneli.org/) alias loop-xfer-gen-0 loop_gen alias loop-xfer-3 loop_fish2 alias loop-xfer-gen-10 loop_gen alias cipher-2 des alias cipher-3 fish2 alias cipher-4 blowfish alias cipher-6 idea alias cipher-7 serp6f alias cipher-8 mars6 alias cipher-11 rc62 alias cipher-15 dfc2 alias cipher-16 rijndael alias cipher-17 rc5 # Support for i2c and lm_sensors alias char-major-89 i2c-dev # xfrm alias xfrm-type-2-4 xfrm4_tunnel alias xfrm-type-2-50 esp4 alias xfrm-type-2-51 ah4 alias xfrm-type-2-108 ipcomp alias xfrm-type-10-41 xfrm6_tunnel alias xfrm-type-10-50 esp6 alias xfrm-type-10-51 ah6 alias xfrm-type-10-108 ipcomp6 alias sha1 sha1-generic # change to aes-i586 to boost performance alias aes aes-generic ================================================ FILE: rd/etc/modprobe.d/blacklist.conf ================================================ # # Listing a module here prevents the hotplug scripts from loading it. # Usually that'd be so that some other driver will bind it instead, # no matter which driver happens to get probed first. Sometimes user # mode tools can also control driver binding. # tulip ... de4x5, xircom_tulip_cb, dmfe (...) handle same devices blacklist de4x5 # At least 2.4.3 and later xircom_tulip doesn't have that conflict # xircom_tulip_cb blacklist dmfe #evbug is a debug tool and should be loaded explicitly blacklist evbug # Alternate 8139 driver. Some 8139 cards need this specific driver, # though... # blacklist 8139cp # Ethernet over IEEE1394 module. In too many cases this will load # when there's no eth1394 device present (just an IEEE1394 port) blacklist eth1394 # This module causes many Intel motherboards to crash and reboot. blacklist i8xx-tco # The kernel lists this as "experimental", but for now it's "broken" blacklist via-ircc # ALSA modules to support sound modems. These should be loaded manually # if needed. For most people they just break sound support... blacklist snd-atiixp-modem blacklist snd-intel8x0m blacklist snd-via82xx-modem # we don't want use the pc speaker blacklist snd-pcsp # Alternative module to Orinoco Wireless Cards. blacklist hostap blacklist hostap_cs # framebuffer drivers blacklist aty128fb blacklist atyfb blacklist radeonfb blacklist i810fb blacklist cirrusfb blacklist intelfb blacklist kyrofb blacklist i2c-matroxfb blacklist hgafb blacklist nvidiafb blacklist rivafb blacklist savagefb blacklist sstfb blacklist neofb blacklist tridentfb blacklist tdfxfb blacklist viafb blacklist virgefb blacklist vga16fb blacklist matroxfb_base blacklist vt8623fb # blacklist 1394 drivers blacklist ohci1394 blacklist video1394 blacklist dv1394 # blacklist mISDN dirver by default as we prefer dahdi drivers blacklist hfcmulti blacklist hfcpci blacklist hfcsusb # blacklist C7 cpu freq. use acpi-cpufreq instead blacklist e_powersaver blacklist microcode ================================================ FILE: rd/etc/modprobe.d/i386.conf ================================================ alias parport_lowlevel parport_pc alias char-major-10-144 nvram alias binfmt-0064 binfmt_aout alias char-major-10-135 rtc ================================================ FILE: rd/etc/modprobe.d/kms.conf ================================================ # enable modeset options radeon modeset=1 options i915 modeset=1 options nouveau modeset=1 ================================================ FILE: rd/etc/modules ================================================ af_packet ipv6 ================================================ FILE: rd/etc/motd ================================================ Welcome to Alpine! The Alpine Wiki contains a large amount of how-to guides and general information about administrating Alpine systems. See . You can setup the system with the command: setup-alpine You may change this message by editing /etc/motd. ================================================ FILE: rd/etc/network/if-up.d/dad ================================================ #!/bin/sh # Block ifup until DAD completion # Copyright (c) 2016-2018 Kaarle Ritvanen has_flag() { ip address show dev $IFACE | grep -q " $1 " } while has_flag tentative && ! has_flag dadfailed; do sleep 0.2 done ================================================ FILE: rd/etc/os-release ================================================ NAME="Alpine Linux" ID=alpine VERSION_ID=3.12.0 PRETTY_NAME="Alpine Linux v3.12" HOME_URL="https://alpinelinux.org/" BUG_REPORT_URL="https://bugs.alpinelinux.org/" ================================================ FILE: rd/etc/passwd ================================================ root:x:0:0:root:/root:/bin/ash bin:x:1:1:bin:/bin:/sbin/nologin daemon:x:2:2:daemon:/sbin:/sbin/nologin adm:x:3:4:adm:/var/adm:/sbin/nologin lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin sync:x:5:0:sync:/sbin:/bin/sync shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown halt:x:7:0:halt:/sbin:/sbin/halt mail:x:8:12:mail:/var/mail:/sbin/nologin news:x:9:13:news:/usr/lib/news:/sbin/nologin uucp:x:10:14:uucp:/var/spool/uucppublic:/sbin/nologin operator:x:11:0:operator:/root:/sbin/nologin man:x:13:15:man:/usr/man:/sbin/nologin postmaster:x:14:12:postmaster:/var/mail:/sbin/nologin cron:x:16:16:cron:/var/spool/cron:/sbin/nologin ftp:x:21:21::/var/lib/ftp:/sbin/nologin sshd:x:22:22:sshd:/dev/null:/sbin/nologin at:x:25:25:at:/var/spool/cron/atjobs:/sbin/nologin squid:x:31:31:Squid:/var/cache/squid:/sbin/nologin xfs:x:33:33:X Font Server:/etc/X11/fs:/sbin/nologin games:x:35:35:games:/usr/games:/sbin/nologin cyrus:x:85:12::/usr/cyrus:/sbin/nologin vpopmail:x:89:89::/var/vpopmail:/sbin/nologin ntp:x:123:123:NTP:/var/empty:/sbin/nologin smmsp:x:209:209:smmsp:/var/spool/mqueue:/sbin/nologin guest:x:405:100:guest:/dev/null:/sbin/nologin nobody:x:65534:65534:nobody:/:/sbin/nologin dhcp:x:100:101:dhcp:/var/lib/dhcp:/sbin/nologin ================================================ FILE: rd/etc/passwd- ================================================ root:x:0:0:root:/root:/bin/ash bin:x:1:1:bin:/bin:/sbin/nologin daemon:x:2:2:daemon:/sbin:/sbin/nologin adm:x:3:4:adm:/var/adm:/sbin/nologin lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin sync:x:5:0:sync:/sbin:/bin/sync shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown halt:x:7:0:halt:/sbin:/sbin/halt mail:x:8:12:mail:/var/mail:/sbin/nologin news:x:9:13:news:/usr/lib/news:/sbin/nologin uucp:x:10:14:uucp:/var/spool/uucppublic:/sbin/nologin operator:x:11:0:operator:/root:/sbin/nologin man:x:13:15:man:/usr/man:/sbin/nologin postmaster:x:14:12:postmaster:/var/mail:/sbin/nologin cron:x:16:16:cron:/var/spool/cron:/sbin/nologin ftp:x:21:21::/var/lib/ftp:/sbin/nologin sshd:x:22:22:sshd:/dev/null:/sbin/nologin at:x:25:25:at:/var/spool/cron/atjobs:/sbin/nologin squid:x:31:31:Squid:/var/cache/squid:/sbin/nologin xfs:x:33:33:X Font Server:/etc/X11/fs:/sbin/nologin games:x:35:35:games:/usr/games:/sbin/nologin cyrus:x:85:12::/usr/cyrus:/sbin/nologin vpopmail:x:89:89::/var/vpopmail:/sbin/nologin ntp:x:123:123:NTP:/var/empty:/sbin/nologin smmsp:x:209:209:smmsp:/var/spool/mqueue:/sbin/nologin guest:x:405:100:guest:/dev/null:/sbin/nologin nobody:x:65534:65534:nobody:/:/sbin/nologin dhcp:x:100:101:dhcp:/var/lib/dhcp:/sbin/nologin ================================================ FILE: rd/etc/profile ================================================ export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin export PAGER=less export PS1='\h:\w\$ ' umask 022 for script in /etc/profile.d/*.sh ; do if [ -r $script ] ; then . $script fi done ================================================ FILE: rd/etc/profile.d/color_prompt ================================================ # Setup a red prompt for root and a green one for users. # rename this file to color_prompt.sh to actually enable it NORMAL="\[\e[0m\]" RED="\[\e[1;31m\]" GREEN="\[\e[1;32m\]" if [ "$USER" = root ]; then PS1="$RED\h [$NORMAL\w$RED]# $NORMAL" else PS1="$GREEN\h [$NORMAL\w$GREEN]\$ $NORMAL" fi ================================================ FILE: rd/etc/profile.d/locale ================================================ CHARSET=UTF-8 LANG=C.UTF-8 LC_COLLATE=C ================================================ FILE: rd/etc/protocols ================================================ # /etc/protocols: # $Id: protocols,v 1.1 2004/02/04 03:32:40 pebenito Exp $ # $Header: /home/cvsroot/gentoo-projects/embedded/baselayout-lite/protocols,v 1.1 2004/02/04 03:32:40 pebenito Exp $ # # Internet (IP) protocols # # from: @(#)protocols 5.1 (Berkeley) 4/17/89 # # Updated for NetBSD based on RFC 1340, Assigned Numbers (July 1992). ip 0 IP # internet protocol, pseudo protocol number icmp 1 ICMP # internet control message protocol igmp 2 IGMP # Internet Group Management ggp 3 GGP # gateway-gateway protocol ipencap 4 IP-ENCAP # IP encapsulated in IP (officially ``IP'') st 5 ST # ST datagram mode tcp 6 TCP # transmission control protocol egp 8 EGP # exterior gateway protocol pup 12 PUP # PARC universal packet protocol udp 17 UDP # user datagram protocol hmp 20 HMP # host monitoring protocol xns-idp 22 XNS-IDP # Xerox NS IDP rdp 27 RDP # "reliable datagram" protocol iso-tp4 29 ISO-TP4 # ISO Transport Protocol class 4 xtp 36 XTP # Xpress Tranfer Protocol ddp 37 DDP # Datagram Delivery Protocol idpr-cmtp 38 IDPR-CMTP # IDPR Control Message Transport ipv6 41 IPv6 # IPv6 ipv6-route 43 IPv6-Route # Routing Header for IPv6 ipv6-frag 44 IPv6-Frag # Fragment Header for IPv6 idrp 45 IDRP # Inter-Domain Routing Protocol rsvp 46 RSVP # Reservation Protocol gre 47 GRE # General Routing Encapsulation esp 50 ESP # Encap Security Payload for IPv6 ah 51 AH # Authentication Header for IPv6 skip 57 SKIP # SKIP ipv6-icmp 58 IPv6-ICMP # ICMP for IPv6 ipv6-nonxt 59 IPv6-NoNxt # No Next Header for IPv6 ipv6-opts 60 IPv6-Opts # Destination Options for IPv6 rspf 73 RSPF # Radio Shortest Path First. vmtp 81 VMTP # Versatile Message Transport ospf 89 OSPFIGP # Open Shortest Path First IGP ipip 94 IPIP # IP-within-IP Encapsulation Protocol encap 98 ENCAP # Yet Another IP encapsulation pim 103 PIM # Protocol Independent Multicast ================================================ FILE: rd/etc/resolv.conf ================================================ nameserver 1.1.1.1 ================================================ FILE: rd/etc/securetty ================================================ console tty1 tty2 tty3 tty4 tty5 tty6 tty7 tty8 tty9 tty10 tty11 ================================================ FILE: rd/etc/services ================================================ # Network services, Internet style # # Note that it is presently the policy of IANA to assign a single well-known # port number for both TCP and UDP; hence, officially ports have two entries # even if the protocol doesn't support UDP operations. # # Updated from https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml . # # New ports will be added on request if they have been officially assigned # by IANA and used in the real-world or are needed by a debian package. # If you need a huge list of used numbers please install the nmap package. tcpmux 1/tcp # TCP port service multiplexer echo 7/tcp echo 7/udp discard 9/tcp sink null discard 9/udp sink null systat 11/tcp users daytime 13/tcp daytime 13/udp netstat 15/tcp qotd 17/tcp quote chargen 19/tcp ttytst source chargen 19/udp ttytst source ftp-data 20/tcp ftp 21/tcp fsp 21/udp fspd ssh 22/tcp # SSH Remote Login Protocol telnet 23/tcp smtp 25/tcp mail time 37/tcp timserver time 37/udp timserver whois 43/tcp nicname tacacs 49/tcp # Login Host Protocol (TACACS) tacacs 49/udp domain 53/tcp # Domain Name Server domain 53/udp bootps 67/udp bootpc 68/udp tftp 69/udp gopher 70/tcp # Internet Gopher finger 79/tcp http 80/tcp www # WorldWideWeb HTTP kerberos 88/tcp kerberos5 krb5 kerberos-sec # Kerberos v5 kerberos 88/udp kerberos5 krb5 kerberos-sec # Kerberos v5 iso-tsap 102/tcp tsap # part of ISODE acr-nema 104/tcp dicom # Digital Imag. & Comm. 300 pop3 110/tcp pop-3 # POP version 3 sunrpc 111/tcp portmapper # RPC 4.0 portmapper sunrpc 111/udp portmapper auth 113/tcp authentication tap ident nntp 119/tcp readnews untp # USENET News Transfer Protocol ntp 123/udp # Network Time Protocol epmap 135/tcp loc-srv # DCE endpoint resolution netbios-ns 137/tcp # NETBIOS Name Service netbios-ns 137/udp netbios-dgm 138/tcp # NETBIOS Datagram Service netbios-dgm 138/udp netbios-ssn 139/tcp # NETBIOS session service netbios-ssn 139/udp imap2 143/tcp imap # Interim Mail Access P 2 and 4 snmp 161/tcp # Simple Net Mgmt Protocol snmp 161/udp snmp-trap 162/tcp snmptrap # Traps for SNMP snmp-trap 162/udp snmptrap cmip-man 163/tcp # ISO mgmt over IP (CMOT) cmip-man 163/udp cmip-agent 164/tcp cmip-agent 164/udp mailq 174/tcp # Mailer transport queue for Zmailer xdmcp 177/udp # X Display Manager Control Protocol bgp 179/tcp # Border Gateway Protocol smux 199/tcp # SNMP Unix Multiplexer qmtp 209/tcp # Quick Mail Transfer Protocol z3950 210/tcp wais # NISO Z39.50 database ipx 213/udp # IPX [RFC1234] ptp-event 319/udp ptp-general 320/udp pawserv 345/tcp # Perf Analysis Workbench zserv 346/tcp # Zebra server rpc2portmap 369/tcp rpc2portmap 369/udp # Coda portmapper codaauth2 370/tcp codaauth2 370/udp # Coda authentication server clearcase 371/udp Clearcase ldap 389/tcp # Lightweight Directory Access Protocol ldap 389/udp svrloc 427/tcp # Server Location svrloc 427/udp https 443/tcp # http protocol over TLS/SSL snpp 444/tcp # Simple Network Paging Protocol microsoft-ds 445/tcp # Microsoft Naked CIFS microsoft-ds 445/udp kpasswd 464/tcp kpasswd 464/udp submissions 465/tcp ssmtp smtps urd # Submission over TLS [RFC8314] saft 487/tcp # Simple Asynchronous File Transfer isakmp 500/udp # IPSEC key management rtsp 554/tcp # Real Time Stream Control Protocol rtsp 554/udp nqs 607/tcp # Network Queuing system asf-rmcp 623/udp # ASF Remote Management and Control Protocol qmqp 628/tcp ipp 631/tcp # Internet Printing Protocol # # UNIX specific services # exec 512/tcp biff 512/udp comsat login 513/tcp who 513/udp whod shell 514/tcp cmd syslog # no passwords used syslog 514/udp printer 515/tcp spooler # line printer spooler talk 517/udp ntalk 518/udp route 520/udp router routed # RIP gdomap 538/tcp # GNUstep distributed objects gdomap 538/udp uucp 540/tcp uucpd # uucp daemon klogin 543/tcp # Kerberized `rlogin' (v5) kshell 544/tcp krcmd # Kerberized `rsh' (v5) dhcpv6-client 546/udp dhcpv6-server 547/udp afpovertcp 548/tcp # AFP over TCP nntps 563/tcp snntp # NNTP over SSL submission 587/tcp # Submission [RFC4409] ldaps 636/tcp # LDAP over SSL ldaps 636/udp tinc 655/tcp # tinc control port tinc 655/udp silc 706/tcp kerberos-adm 749/tcp # Kerberos `kadmin' (v5) # domain-s 853/tcp # DNS over TLS [RFC7858] domain-s 853/udp # DNS over DTLS [RFC8094] rsync 873/tcp ftps-data 989/tcp # FTP over SSL (data) ftps 990/tcp telnets 992/tcp # Telnet over SSL imaps 993/tcp # IMAP over SSL pop3s 995/tcp # POP-3 over SSL # # From ``Assigned Numbers'': # #> The Registered Ports are not controlled by the IANA and on most systems #> can be used by ordinary user processes or programs executed by ordinary #> users. # #> Ports are used in the TCP [45,106] to name the ends of logical #> connections which carry long term conversations. For the purpose of #> providing services to unknown callers, a service contact port is #> defined. This list specifies the port used by the server process as its #> contact port. While the IANA can not control uses of these ports it #> does register or list uses of these ports as a convienence to the #> community. # socks 1080/tcp # socks proxy server proofd 1093/tcp rootd 1094/tcp openvpn 1194/tcp openvpn 1194/udp rmiregistry 1099/tcp # Java RMI Registry lotusnote 1352/tcp lotusnotes # Lotus Note ms-sql-s 1433/tcp # Microsoft SQL Server ms-sql-s 1433/udp ms-sql-m 1434/tcp # Microsoft SQL Monitor ms-sql-m 1434/udp ingreslock 1524/tcp datametrics 1645/tcp old-radius datametrics 1645/udp old-radius sa-msg-port 1646/tcp old-radacct sa-msg-port 1646/udp old-radacct kermit 1649/tcp groupwise 1677/tcp l2f 1701/udp l2tp radius 1812/tcp radius 1812/udp radius-acct 1813/tcp radacct # Radius Accounting radius-acct 1813/udp radacct cisco-sccp 2000/tcp # Cisco SCCP nfs 2049/tcp # Network File System nfs 2049/udp # Network File System gnunet 2086/tcp gnunet 2086/udp rtcm-sc104 2101/tcp # RTCM SC-104 IANA 1/29/99 rtcm-sc104 2101/udp gsigatekeeper 2119/tcp gris 2135/tcp # Grid Resource Information Server cvspserver 2401/tcp # CVS client/server operations venus 2430/tcp # codacon port venus 2430/udp # Venus callback/wbc interface venus-se 2431/tcp # tcp side effects venus-se 2431/udp # udp sftp side effect codasrv 2432/tcp # not used codasrv 2432/udp # server port codasrv-se 2433/tcp # tcp side effects codasrv-se 2433/udp # udp sftp side effect mon 2583/tcp # MON traps mon 2583/udp dict 2628/tcp # Dictionary server f5-globalsite 2792/tcp gsiftp 2811/tcp gpsd 2947/tcp gds-db 3050/tcp gds_db # InterBase server icpv2 3130/tcp icp # Internet Cache Protocol icpv2 3130/udp icp isns 3205/tcp # iSNS Server Port isns 3205/udp # iSNS Server Port iscsi-target 3260/tcp mysql 3306/tcp ms-wbt-server 3389/tcp nut 3493/tcp # Network UPS Tools nut 3493/udp distcc 3632/tcp # distributed compiler distcc 3632/udp daap 3689/tcp # Digital Audio Access Protocol daap 3689/udp svn 3690/tcp subversion # Subversion protocol svn 3690/udp subversion suucp 4031/tcp # UUCP over SSL suucp 4031/udp sysrqd 4094/tcp # sysrq daemon sysrqd 4094/udp sieve 4190/tcp # ManageSieve Protocol epmd 4369/tcp # Erlang Port Mapper Daemon epmd 4369/udp remctl 4373/tcp # Remote Authenticated Command Service remctl 4373/udp f5-iquery 4353/tcp # F5 iQuery f5-iquery 4353/udp ipsec-nat-t 4500/udp # IPsec NAT-Traversal [RFC3947] iax 4569/tcp # Inter-Asterisk eXchange iax 4569/udp mtn 4691/tcp # monotone Netsync Protocol mtn 4691/udp radmin-port 4899/tcp # RAdmin Port radmin-port 4899/udp sip 5060/tcp # Session Initiation Protocol sip 5060/udp sip-tls 5061/tcp sip-tls 5061/udp xmpp-client 5222/tcp jabber-client # Jabber Client Connection xmpp-server 5269/tcp jabber-server # Jabber Server Connection cfengine 5308/tcp mdns 5353/udp # Multicast DNS postgresql 5432/tcp postgres # PostgreSQL Database freeciv 5556/tcp rptp # Freeciv gameplay amqps 5671/tcp # AMQP protocol over TLS/SSL amqp 5672/tcp amqp 5672/udp amqp 5672/sctp x11 6000/tcp x11-0 # X Window System x11-1 6001/tcp x11-2 6002/tcp x11-3 6003/tcp x11-4 6004/tcp x11-5 6005/tcp x11-6 6006/tcp x11-7 6007/tcp gnutella-svc 6346/tcp # gnutella gnutella-svc 6346/udp gnutella-rtr 6347/tcp # gnutella gnutella-rtr 6347/udp sge-qmaster 6444/tcp sge_qmaster # Grid Engine Qmaster Service sge-execd 6445/tcp sge_execd # Grid Engine Execution Service mysql-proxy 6446/tcp # MySQL Proxy babel 6696/udp # Babel Routing Protocol ircs-u 6697/tcp # Internet Relay Chat via TLS/SSL afs3-fileserver 7000/tcp bbs # file server itself afs3-fileserver 7000/udp bbs afs3-callback 7001/tcp # callbacks to cache managers afs3-callback 7001/udp afs3-prserver 7002/tcp # users & groups database afs3-prserver 7002/udp afs3-vlserver 7003/tcp # volume location database afs3-vlserver 7003/udp afs3-kaserver 7004/tcp # AFS/Kerberos authentication afs3-kaserver 7004/udp afs3-volser 7005/tcp # volume managment server afs3-volser 7005/udp afs3-errors 7006/tcp # error interpretation service afs3-errors 7006/udp afs3-bos 7007/tcp # basic overseer process afs3-bos 7007/udp afs3-update 7008/tcp # server-to-server updater afs3-update 7008/udp afs3-rmtsys 7009/tcp # remote cache manager service afs3-rmtsys 7009/udp font-service 7100/tcp xfs # X Font Service http-alt 8080/tcp webcache # WWW caching service puppet 8140/tcp # The Puppet master service bacula-dir 9101/tcp # Bacula Director bacula-fd 9102/tcp # Bacula File Daemon bacula-sd 9103/tcp # Bacula Storage Daemon xmms2 9667/tcp # Cross-platform Music Multiplexing System nbd 10809/tcp # Linux Network Block Device zabbix-agent 10050/tcp # Zabbix Agent zabbix-trapper 10051/tcp # Zabbix Trapper amanda 10080/tcp # amanda backup services dicom 11112/tcp hkp 11371/tcp # OpenPGP HTTP Keyserver db-lsp 17500/tcp # Dropbox LanSync Protocol dcap 22125/tcp # dCache Access Protocol gsidcap 22128/tcp # GSI dCache Access Protocol wnn6 22273/tcp # wnn6 # # Datagram Delivery Protocol services # rtmp 1/ddp # Routing Table Maintenance Protocol nbp 2/ddp # Name Binding Protocol echo 4/ddp # AppleTalk Echo Protocol zip 6/ddp # Zone Information Protocol #========================================================================= # The remaining port numbers are not as allocated by IANA. #========================================================================= # Kerberos (Project Athena/MIT) services kerberos4 750/udp kerberos-iv kdc # Kerberos (server) kerberos4 750/tcp kerberos-iv kdc kerberos-master 751/udp kerberos_master # Kerberos authentication kerberos-master 751/tcp passwd-server 752/udp passwd_server # Kerberos passwd server krb-prop 754/tcp krb_prop krb5_prop hprop # Kerberos slave propagation zephyr-srv 2102/udp # Zephyr server zephyr-clt 2103/udp # Zephyr serv-hm connection zephyr-hm 2104/udp # Zephyr hostmanager iprop 2121/tcp # incremental propagation supfilesrv 871/tcp # Software Upgrade Protocol server supfiledbg 1127/tcp # Software Upgrade Protocol debugging # # Services added for the Debian GNU/Linux distribution # poppassd 106/tcp # Eudora poppassd 106/udp moira-db 775/tcp moira_db # Moira database moira-update 777/tcp moira_update # Moira update protocol moira-ureg 779/udp moira_ureg # Moira user registration spamd 783/tcp # spamassassin daemon skkserv 1178/tcp # skk jisho server port predict 1210/udp # predict -- satellite tracking rmtcfg 1236/tcp # Gracilis Packeten remote config server xtel 1313/tcp # french minitel xtelw 1314/tcp # french minitel support 1529/tcp # GNATS cfinger 2003/tcp # GNU Finger frox 2121/tcp # frox: caching ftp proxy zebrasrv 2600/tcp # zebra service zebra 2601/tcp # zebra vty ripd 2602/tcp # ripd vty (zebra) ripngd 2603/tcp # ripngd vty (zebra) ospfd 2604/tcp # ospfd vty (zebra) bgpd 2605/tcp # bgpd vty (zebra) ospf6d 2606/tcp # ospf6d vty (zebra) ospfapi 2607/tcp # OSPF-API isisd 2608/tcp # ISISd vty (zebra) afbackup 2988/tcp # Afbackup system afbackup 2988/udp afmbackup 2989/tcp # Afmbackup system afmbackup 2989/udp fax 4557/tcp # FAX transmission service (old) hylafax 4559/tcp # HylaFAX client-server protocol (new) distmp3 4600/tcp # distmp3host daemon munin 4949/tcp lrrd # Munin enbd-cstatd 5051/tcp # ENBD client statd enbd-sstatd 5052/tcp # ENBD server statd pcrd 5151/tcp # PCR-1000 Daemon noclog 5354/tcp # noclogd with TCP (nocol) noclog 5354/udp # noclogd with UDP (nocol) hostmon 5355/tcp # hostmon uses TCP (nocol) hostmon 5355/udp # hostmon uses UDP (nocol) rplay 5555/udp # RPlay audio service nrpe 5666/tcp # Nagios Remote Plugin Executor nsca 5667/tcp # Nagios Agent - NSCA mrtd 5674/tcp # MRT Routing Daemon bgpsim 5675/tcp # MRT Routing Simulator canna 5680/tcp # cannaserver syslog-tls 6514/tcp # Syslog over TLS [RFC5425] sane-port 6566/tcp sane saned # SANE network scanner daemon ircd 6667/tcp # Internet Relay Chat zope-ftp 8021/tcp # zope management by ftp tproxy 8081/tcp # Transparent Proxy omniorb 8088/tcp # OmniORB omniorb 8088/udp clc-build-daemon 8990/tcp # Common lisp build daemon xinetd 9098/tcp mandelspawn 9359/udp mandelbrot # network mandelbrot git 9418/tcp # Git Version Control System zope 9673/tcp # zope server webmin 10000/tcp kamanda 10081/tcp # amanda backup services (Kerberos) amandaidx 10082/tcp # amanda backup services amidxtape 10083/tcp # amanda backup services smsqp 11201/tcp # Alamin SMS gateway smsqp 11201/udp xpilot 15345/tcp # XPilot Contact Port xpilot 15345/udp sgi-cmsd 17001/udp # Cluster membership services daemon sgi-crsd 17002/udp sgi-gcd 17003/udp # SGI Group membership daemon sgi-cad 17004/tcp # Cluster Admin daemon isdnlog 20011/tcp # isdn logging system isdnlog 20011/udp vboxd 20012/tcp # voice box system vboxd 20012/udp binkp 24554/tcp # binkp fidonet protocol asp 27374/tcp # Address Search Protocol asp 27374/udp csync2 30865/tcp # cluster synchronization tool dircproxy 57000/tcp # Detachable IRC Proxy tfido 60177/tcp # fidonet EMSI over telnet fido 60179/tcp # fidonet EMSI over TCP # Local services ================================================ FILE: rd/etc/shadow ================================================ root:$6$gCnCIJyGxcPvde2g$pjwJC3FkxXHFXQMMFpd1q43THVEi7IdEh1/rwaJ9LzepagBndmdRFL.UOYHYL9k88rze.f5APeuQU82haHNOV1:18600:0::::: bin:!::0::::: daemon:!::0::::: adm:!::0::::: lp:!::0::::: sync:!::0::::: shutdown:!::0::::: halt:!::0::::: mail:!::0::::: news:!::0::::: uucp:!::0::::: operator:!::0::::: man:!::0::::: postmaster:!::0::::: cron:!::0::::: ftp:!::0::::: sshd:!::0::::: at:!::0::::: squid:!::0::::: xfs:!::0::::: games:!::0::::: cyrus:!::0::::: vpopmail:!::0::::: ntp:!::0::::: smmsp:!::0::::: guest:!::0::::: nobody:!::0::::: dhcp:!:18600:0:99999:7::: ================================================ FILE: rd/etc/shadow- ================================================ root:!::0::::: bin:!::0::::: daemon:!::0::::: adm:!::0::::: lp:!::0::::: sync:!::0::::: shutdown:!::0::::: halt:!::0::::: mail:!::0::::: news:!::0::::: uucp:!::0::::: operator:!::0::::: man:!::0::::: postmaster:!::0::::: cron:!::0::::: ftp:!::0::::: sshd:!::0::::: at:!::0::::: squid:!::0::::: xfs:!::0::::: games:!::0::::: cyrus:!::0::::: vpopmail:!::0::::: ntp:!::0::::: smmsp:!::0::::: guest:!::0::::: nobody:!::0::::: dhcp:!:18600:0:99999:7::: ================================================ FILE: rd/etc/shells ================================================ # valid login shells /bin/sh /bin/ash /bin/bash ================================================ FILE: rd/etc/sysctl.conf ================================================ # content of this file will override /etc/sysctl.d/* ================================================ FILE: rd/etc/udhcpd.conf ================================================ # Sample udhcpd configuration file (/etc/udhcpd.conf) # Values shown are defaults # The start and end of the IP lease block start 192.168.0.20 end 192.168.0.254 # The interface that udhcpd will use interface eth0 # The maximum number of leases (includes addresses reserved # by OFFER's, DECLINE's, and ARP conflicts). Will be corrected # if it's bigger than IP lease block, but it ok to make it # smaller than lease block. #max_leases 254 # The amount of time that an IP will be reserved (leased to nobody) # if a DHCP decline message is received (seconds) #decline_time 3600 # The amount of time that an IP will be reserved # if an ARP conflict occurs (seconds) #conflict_time 3600 # How long an offered address is reserved (seconds) #offer_time 60 # If client asks for lease below this value, it will be rounded up # to this value (seconds) #min_lease 60 # The location of the pid file #pidfile /var/run/udhcpd.pid # The location of the leases file #lease_file /var/lib/misc/udhcpd.leases # The time period at which udhcpd will write out leases file. # If this is 0, udhcpd will never automatically write leases file. # Specified in seconds. #auto_time 7200 # Every time udhcpd writes a leases file, the below script will be called #notify_file # default: no script #notify_file dumpleases # useful for debugging # The following are BOOTP specific options # next server to use in bootstrap #siaddr 192.168.0.22 # default: 0.0.0.0 (none) # tftp server name #sname zorak # default: none # tftp file to download (e.g. kernel image) #boot_file /var/nfs_root # default: none # NOTE: "boot_file FILE" and "opt bootfile FILE" are conceptually the same, # but "boot_file" goes into BOOTP-defined fixed-size field in the packet, # whereas "opt bootfile" goes into DHCP option 0x43. # Same for "sname HOST" and "opt tftp HOST". # Static leases map #static_lease 00:60:08:11:CE:4E 192.168.0.54 #static_lease 00:60:08:11:CE:3E 192.168.0.44 optional_hostname # The remainder of options are DHCP options and can be specified with the # keyword 'opt' or 'option'. If an option can take multiple items, such # as the dns option, they can be listed on the same line, or multiple # lines. # Examples: opt dns 192.168.10.2 192.168.10.10 option subnet 255.255.255.0 opt router 192.168.10.2 opt wins 192.168.10.10 option dns 129.219.13.81 # appended to above DNS servers for a total of 3 option domain local option lease 864000 # default: 10 days option msstaticroutes 10.0.0.0/8 10.127.0.1 # single static route option staticroutes 10.0.0.0/8 10.127.0.1, 10.11.12.0/24 10.11.12.1 # Arbitrary option in hex or string form: option 0x08 01020304 # option 8: "cookie server IP addr: 1.2.3.4" option 14 "dumpfile" # Currently supported options [hex option value] (for more info, see options.c): #opt lease NUM # [0x33] #opt subnet IP # [0x01] #opt broadcast IP # [0x1c] #opt router IP_LIST # [0x03] #opt ipttl NUM # [0x17] #opt mtu NUM # [0x1a] #opt hostname STRING # [0x0c] client's hostname #opt domain STRING # [0x0f] client's domain suffix #opt search STRING_LIST # [0x77] search domains #opt nisdomain STRING # [0x28] #opt timezone NUM # [0x02] (localtime - UTC_time) in seconds. signed #opt tftp STRING # [0x42] tftp server name #opt bootfile STRING # [0x43] tftp file to download (e.g. kernel image) #opt bootsize NUM # [0x0d] size of that file #opt rootpath STRING # [0x11] (NFS) path to mount as root fs #opt wpad STRING # [0xfc] Web Proxy Auto Discovery Protocol #opt serverid IP # [0x36] default: server's IP #opt message STRING # [0x38] error message (udhcpd sends it on success too) #opt vlanid NUM # [0x84] 802.1P VLAN ID #opt vlanpriority NUM # [0x85] 802.1Q VLAN priority # RFC 5071: PXELINUX Options #opt 0xd0 F100747E # [0xd0] magic #opt pxeconffile STRING # [0xd1] #opt pxepathprefix STRING # [0xd2] #opt reboottime NUM # [0xd3] bootstrap timeout # Options specifying server(s) #opt dns IP_LIST # [0x06] #opt wins IP_LIST # [0x2c] #opt nissrv IP_LIST # [0x29] #opt ntpsrv IP_LIST # [0x2a] #opt lprsrv IP_LIST # [0x09] #opt swapsrv IP # [0x10] # Options specifying routes #opt routes IP_PAIR_LIST # [0x21] #opt staticroutes STATIC_ROUTES # [0x79] RFC 3442 classless static route option #opt msstaticroutes STATIC_ROUTES # [0xf9] same, using MS option number # Obsolete options, no longer supported #opt logsrv IP_LIST # [0x07] 704/UDP log server (not syslog!) #opt namesrv IP_LIST # [0x05] IEN 116 name server, obsolete (August 1979!!!) #opt cookiesrv IP_LIST # [0x08] RFC 865 "quote of the day" server, rarely (never?) used #opt timesrv IP_LIST # [0x04] RFC 868 time server, rarely (never?) used # TODO: in development #opt userclass STRING # [0x4d] RFC 3004. set of LASCII strings. "I am a printer" etc #opt sipsrv STRING LIST # [0x78] RFC 3361. flag byte, then: 0: domain names, 1: IP addrs #opt ip6rd .... # [0xd4] IPv6 rapid deployment ================================================ FILE: rd/lib/firmware/.gitignore ================================================ * !.gitignore ================================================ FILE: rd/root/.config/htop/htoprc ================================================ # Beware! This file is rewritten by htop when settings are changed in the interface. # The parser is also very primitive, and not human-friendly. fields=0 18 39 2 46 49 1 sort_key=46 sort_direction=1 hide_threads=0 hide_kernel_threads=0 hide_userland_threads=0 shadow_other_users=0 show_thread_names=0 show_program_path=1 highlight_base_name=0 highlight_megabytes=1 highlight_threads=1 tree_view=0 header_margin=1 detailed_cpu_time=0 cpu_count_from_zero=0 update_process_names=0 account_guest_in_cpu_meter=0 color_scheme=0 delay=15 left_meters=LeftCPUs2 Memory Swap left_meter_modes=1 1 1 right_meters=RightCPUs2 Tasks LoadAverage Uptime right_meter_modes=1 2 2 2 ================================================ FILE: rd/sbin/ldconfig ================================================ #!/bin/sh scan_dirs() { scanelf -qS "$@" | while read SONAME FILE; do TARGET="${FILE##*/}" LINK="${FILE%/*}/$SONAME" case "$FILE" in /lib/*|/usr/lib/*|/usr/local/lib/*) ;; *) [ -h "$LINK" -o ! -e "$LINK" ] && ln -sf "$TARGET" "$LINK" esac done return 0 } # eat ldconfig options while getopts "nNvXvf:C:r:" opt; do : done shift $(( $OPTIND - 1 )) [ $# -gt 0 ] && scan_dirs "$@" ================================================ FILE: rd/usr/bin/2to3-3.8 ================================================ #!/usr/bin/python3.8 import sys from lib2to3.main import main sys.exit(main("lib2to3.fixes")) ================================================ FILE: rd/usr/bin/ldd ================================================ #!/bin/sh exec /lib/ld-musl-aarch64.so.1 --list "$@" ================================================ FILE: rd/usr/bin/pydoc3.8 ================================================ #!/usr/bin/python3.8 import pydoc if __name__ == '__main__': pydoc.cli() ================================================ FILE: rd/usr/lib/python3.8/LICENSE.txt ================================================ A. HISTORY OF THE SOFTWARE ========================== Python was created in the early 1990s by Guido van Rossum at Stichting Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands as a successor of a language called ABC. Guido remains Python's principal author, although it includes many contributions from others. In 1995, Guido continued his work on Python at the Corporation for National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) in Reston, Virginia where he released several versions of the software. In May 2000, Guido and the Python core development team moved to BeOpen.com to form the BeOpen PythonLabs team. In October of the same year, the PythonLabs team moved to Digital Creations, which became Zope Corporation. In 2001, the Python Software Foundation (PSF, see https://www.python.org/psf/) was formed, a non-profit organization created specifically to own Python-related Intellectual Property. Zope Corporation was a sponsoring member of the PSF. All Python releases are Open Source (see http://www.opensource.org for the Open Source Definition). Historically, most, but not all, Python releases have also been GPL-compatible; the table below summarizes the various releases. Release Derived Year Owner GPL- from compatible? (1) 0.9.0 thru 1.2 1991-1995 CWI yes 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes 1.6 1.5.2 2000 CNRI no 2.0 1.6 2000 BeOpen.com no 1.6.1 1.6 2001 CNRI yes (2) 2.1 2.0+1.6.1 2001 PSF no 2.0.1 2.0+1.6.1 2001 PSF yes 2.1.1 2.1+2.0.1 2001 PSF yes 2.1.2 2.1.1 2002 PSF yes 2.1.3 2.1.2 2002 PSF yes 2.2 and above 2.1.1 2001-now PSF yes Footnotes: (1) GPL-compatible doesn't mean that we're distributing Python under the GPL. All Python licenses, unlike the GPL, let you distribute a modified version without making your changes open source. The GPL-compatible licenses make it possible to combine Python with other software that is released under the GPL; the others don't. (2) According to Richard Stallman, 1.6.1 is not GPL-compatible, because its license has a choice of law clause. According to CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 is "not incompatible" with the GPL. Thanks to the many outside volunteers who have worked under Guido's direction to make these releases possible. B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON =============================================================== PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 ------------------------------------------- BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or Organization ("Licensee") accessing and otherwise using this software in source or binary form and its associated documentation ("the Software"). 2. Subject to the terms and conditions of this BeOpen Python License Agreement, BeOpen hereby grants Licensee a non-exclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use the Software alone or in any derivative version, provided, however, that the BeOpen Python License is retained in the Software, alone or in any derivative version prepared by Licensee. 3. BeOpen is making the Software available to Licensee on an "AS IS" basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 5. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 6. This License Agreement shall be governed by and interpreted in all respects by the law of the State of California, excluding conflict of law provisions. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between BeOpen and Licensee. This License Agreement does not grant permission to use BeOpen trademarks or trade names in a trademark sense to endorse or promote products or services of Licensee, or any third party. As an exception, the "BeOpen Python" logos available at http://www.pythonlabs.com/logos.html may be used according to the permissions granted on that web page. 7. By copying, installing or otherwise using the software, Licensee agrees to be bound by the terms and conditions of this License Agreement. CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 --------------------------------------- 1. This LICENSE AGREEMENT is between the Corporation for National Research Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191 ("CNRI"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 1.6.1 software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, CNRI hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python 1.6.1 alone or in any derivative version, provided, however, that CNRI's License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement. This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013. This Agreement may also be obtained from a proxy server on the Internet using the following URL: http://hdl.handle.net/1895.22/1013". 3. In the event Licensee prepares a derivative work that is based on or incorporates Python 1.6.1 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python 1.6.1. 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. This License Agreement shall be governed by the federal intellectual property law of the United States, including without limitation the federal copyright law, and, to the extent such U.S. federal law does not apply, by the law of the Commonwealth of Virginia, excluding Virginia's conflict of law provisions. Notwithstanding the foregoing, with regard to derivative works based on Python 1.6.1 that incorporate non-separable material that was previously distributed under the GNU General Public License (GPL), the law of the Commonwealth of Virginia shall govern this License Agreement only as to issues arising under or with respect to Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between CNRI and Licensee. This License Agreement does not grant permission to use CNRI trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By clicking on the "ACCEPT" button where indicated, or by copying, installing or otherwise using Python 1.6.1, Licensee agrees to be bound by the terms and conditions of this License Agreement. ACCEPT CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 -------------------------------------------------- Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Stichting Mathematisch Centrum or CWI not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ================================================ FILE: rd/usr/lib/python3.8/__future__.py ================================================ """Record of phased-in incompatible language changes. Each line is of the form: FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," CompilerFlag ")" where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples of the same form as sys.version_info: (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int PY_MINOR_VERSION, # the 1; an int PY_MICRO_VERSION, # the 0; an int PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string PY_RELEASE_SERIAL # the 3; an int ) OptionalRelease records the first release in which from __future__ import FeatureName was accepted. In the case of MandatoryReleases that have not yet occurred, MandatoryRelease predicts the release in which the feature will become part of the language. Else MandatoryRelease records when the feature became part of the language; in releases at or after that, modules no longer need from __future__ import FeatureName to use the feature in question, but may continue to use such imports. MandatoryRelease may also be None, meaning that a planned feature got dropped. Instances of class _Feature have two corresponding methods, .getOptionalRelease() and .getMandatoryRelease(). CompilerFlag is the (bitfield) flag that should be passed in the fourth argument to the builtin function compile() to enable the feature in dynamically compiled code. This flag is stored in the .compiler_flag attribute on _Future instances. These values must match the appropriate #defines of CO_xxx flags in Include/compile.h. No feature line is ever to be deleted from this file. """ all_feature_names = [ "nested_scopes", "generators", "division", "absolute_import", "with_statement", "print_function", "unicode_literals", "barry_as_FLUFL", "generator_stop", "annotations", ] __all__ = ["all_feature_names"] + all_feature_names # The CO_xxx symbols are defined here under the same names defined in # code.h and used by compile.h, so that an editor search will find them here. # However, they're not exported in __all__, because they don't really belong to # this module. CO_NESTED = 0x0010 # nested_scopes CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) CO_FUTURE_DIVISION = 0x20000 # division CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals CO_FUTURE_BARRY_AS_BDFL = 0x400000 CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime class _Feature: def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): self.optional = optionalRelease self.mandatory = mandatoryRelease self.compiler_flag = compiler_flag def getOptionalRelease(self): """Return first release in which this feature was recognized. This is a 5-tuple, of the same form as sys.version_info. """ return self.optional def getMandatoryRelease(self): """Return release in which this feature will become mandatory. This is a 5-tuple, of the same form as sys.version_info, or, if the feature was dropped, is None. """ return self.mandatory def __repr__(self): return "_Feature" + repr((self.optional, self.mandatory, self.compiler_flag)) nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "alpha", 0), CO_NESTED) generators = _Feature((2, 2, 0, "alpha", 1), (2, 3, 0, "final", 0), CO_GENERATOR_ALLOWED) division = _Feature((2, 2, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), (2, 6, 0, "alpha", 0), CO_FUTURE_WITH_STATEMENT) print_function = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_PRINT_FUNCTION) unicode_literals = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_UNICODE_LITERALS) barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), (4, 0, 0, "alpha", 0), CO_FUTURE_BARRY_AS_BDFL) generator_stop = _Feature((3, 5, 0, "beta", 1), (3, 7, 0, "alpha", 0), CO_FUTURE_GENERATOR_STOP) annotations = _Feature((3, 7, 0, "beta", 1), (4, 0, 0, "alpha", 0), CO_FUTURE_ANNOTATIONS) ================================================ FILE: rd/usr/lib/python3.8/__phello__.foo.py ================================================ # This file exists as a helper for the test.test_frozen module. ================================================ FILE: rd/usr/lib/python3.8/_bootlocale.py ================================================ """A minimal subset of the locale module used at interpreter startup (imported by the _io module), in order to reduce startup time. Don't import directly from third-party code; use the `locale` module instead! """ import sys import _locale if sys.platform.startswith("win"): def getpreferredencoding(do_setlocale=True): if sys.flags.utf8_mode: return 'UTF-8' return _locale._getdefaultlocale()[1] else: try: _locale.CODESET except AttributeError: if hasattr(sys, 'getandroidapilevel'): # On Android langinfo.h and CODESET are missing, and UTF-8 is # always used in mbstowcs() and wcstombs(). def getpreferredencoding(do_setlocale=True): return 'UTF-8' else: def getpreferredencoding(do_setlocale=True): if sys.flags.utf8_mode: return 'UTF-8' # This path for legacy systems needs the more complex # getdefaultlocale() function, import the full locale module. import locale return locale.getpreferredencoding(do_setlocale) else: def getpreferredencoding(do_setlocale=True): assert not do_setlocale if sys.flags.utf8_mode: return 'UTF-8' result = _locale.nl_langinfo(_locale.CODESET) if not result and sys.platform == 'darwin': # nl_langinfo can return an empty string # when the setting has an invalid value. # Default to UTF-8 in that case because # UTF-8 is the default charset on OSX and # returning nothing will crash the # interpreter. result = 'UTF-8' return result ================================================ FILE: rd/usr/lib/python3.8/_collections_abc.py ================================================ # Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Abstract Base Classes (ABCs) for collections, according to PEP 3119. Unit tests are in test_collections. """ from abc import ABCMeta, abstractmethod import sys __all__ = ["Awaitable", "Coroutine", "AsyncIterable", "AsyncIterator", "AsyncGenerator", "Hashable", "Iterable", "Iterator", "Generator", "Reversible", "Sized", "Container", "Callable", "Collection", "Set", "MutableSet", "Mapping", "MutableMapping", "MappingView", "KeysView", "ItemsView", "ValuesView", "Sequence", "MutableSequence", "ByteString", ] # This module has been renamed from collections.abc to _collections_abc to # speed up interpreter startup. Some of the types such as MutableMapping are # required early but collections module imports a lot of other modules. # See issue #19218 __name__ = "collections.abc" # Private list of types that we want to register with the various ABCs # so that they will pass tests like: # it = iter(somebytearray) # assert isinstance(it, Iterable) # Note: in other implementations, these types might not be distinct # and they may have their own implementation specific types that # are not included on this list. bytes_iterator = type(iter(b'')) bytearray_iterator = type(iter(bytearray())) #callable_iterator = ??? dict_keyiterator = type(iter({}.keys())) dict_valueiterator = type(iter({}.values())) dict_itemiterator = type(iter({}.items())) list_iterator = type(iter([])) list_reverseiterator = type(iter(reversed([]))) range_iterator = type(iter(range(0))) longrange_iterator = type(iter(range(1 << 1000))) set_iterator = type(iter(set())) str_iterator = type(iter("")) tuple_iterator = type(iter(())) zip_iterator = type(iter(zip())) ## views ## dict_keys = type({}.keys()) dict_values = type({}.values()) dict_items = type({}.items()) ## misc ## mappingproxy = type(type.__dict__) generator = type((lambda: (yield))()) ## coroutine ## async def _coro(): pass _coro = _coro() coroutine = type(_coro) _coro.close() # Prevent ResourceWarning del _coro ## asynchronous generator ## async def _ag(): yield _ag = _ag() async_generator = type(_ag) del _ag ### ONE-TRICK PONIES ### def _check_methods(C, *methods): mro = C.__mro__ for method in methods: for B in mro: if method in B.__dict__: if B.__dict__[method] is None: return NotImplemented break else: return NotImplemented return True class Hashable(metaclass=ABCMeta): __slots__ = () @abstractmethod def __hash__(self): return 0 @classmethod def __subclasshook__(cls, C): if cls is Hashable: return _check_methods(C, "__hash__") return NotImplemented class Awaitable(metaclass=ABCMeta): __slots__ = () @abstractmethod def __await__(self): yield @classmethod def __subclasshook__(cls, C): if cls is Awaitable: return _check_methods(C, "__await__") return NotImplemented class Coroutine(Awaitable): __slots__ = () @abstractmethod def send(self, value): """Send a value into the coroutine. Return next yielded value or raise StopIteration. """ raise StopIteration @abstractmethod def throw(self, typ, val=None, tb=None): """Raise an exception in the coroutine. Return next yielded value or raise StopIteration. """ if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val def close(self): """Raise GeneratorExit inside coroutine. """ try: self.throw(GeneratorExit) except (GeneratorExit, StopIteration): pass else: raise RuntimeError("coroutine ignored GeneratorExit") @classmethod def __subclasshook__(cls, C): if cls is Coroutine: return _check_methods(C, '__await__', 'send', 'throw', 'close') return NotImplemented Coroutine.register(coroutine) class AsyncIterable(metaclass=ABCMeta): __slots__ = () @abstractmethod def __aiter__(self): return AsyncIterator() @classmethod def __subclasshook__(cls, C): if cls is AsyncIterable: return _check_methods(C, "__aiter__") return NotImplemented class AsyncIterator(AsyncIterable): __slots__ = () @abstractmethod async def __anext__(self): """Return the next item or raise StopAsyncIteration when exhausted.""" raise StopAsyncIteration def __aiter__(self): return self @classmethod def __subclasshook__(cls, C): if cls is AsyncIterator: return _check_methods(C, "__anext__", "__aiter__") return NotImplemented class AsyncGenerator(AsyncIterator): __slots__ = () async def __anext__(self): """Return the next item from the asynchronous generator. When exhausted, raise StopAsyncIteration. """ return await self.asend(None) @abstractmethod async def asend(self, value): """Send a value into the asynchronous generator. Return next yielded value or raise StopAsyncIteration. """ raise StopAsyncIteration @abstractmethod async def athrow(self, typ, val=None, tb=None): """Raise an exception in the asynchronous generator. Return next yielded value or raise StopAsyncIteration. """ if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val async def aclose(self): """Raise GeneratorExit inside coroutine. """ try: await self.athrow(GeneratorExit) except (GeneratorExit, StopAsyncIteration): pass else: raise RuntimeError("asynchronous generator ignored GeneratorExit") @classmethod def __subclasshook__(cls, C): if cls is AsyncGenerator: return _check_methods(C, '__aiter__', '__anext__', 'asend', 'athrow', 'aclose') return NotImplemented AsyncGenerator.register(async_generator) class Iterable(metaclass=ABCMeta): __slots__ = () @abstractmethod def __iter__(self): while False: yield None @classmethod def __subclasshook__(cls, C): if cls is Iterable: return _check_methods(C, "__iter__") return NotImplemented class Iterator(Iterable): __slots__ = () @abstractmethod def __next__(self): 'Return the next item from the iterator. When exhausted, raise StopIteration' raise StopIteration def __iter__(self): return self @classmethod def __subclasshook__(cls, C): if cls is Iterator: return _check_methods(C, '__iter__', '__next__') return NotImplemented Iterator.register(bytes_iterator) Iterator.register(bytearray_iterator) #Iterator.register(callable_iterator) Iterator.register(dict_keyiterator) Iterator.register(dict_valueiterator) Iterator.register(dict_itemiterator) Iterator.register(list_iterator) Iterator.register(list_reverseiterator) Iterator.register(range_iterator) Iterator.register(longrange_iterator) Iterator.register(set_iterator) Iterator.register(str_iterator) Iterator.register(tuple_iterator) Iterator.register(zip_iterator) class Reversible(Iterable): __slots__ = () @abstractmethod def __reversed__(self): while False: yield None @classmethod def __subclasshook__(cls, C): if cls is Reversible: return _check_methods(C, "__reversed__", "__iter__") return NotImplemented class Generator(Iterator): __slots__ = () def __next__(self): """Return the next item from the generator. When exhausted, raise StopIteration. """ return self.send(None) @abstractmethod def send(self, value): """Send a value into the generator. Return next yielded value or raise StopIteration. """ raise StopIteration @abstractmethod def throw(self, typ, val=None, tb=None): """Raise an exception in the generator. Return next yielded value or raise StopIteration. """ if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val def close(self): """Raise GeneratorExit inside generator. """ try: self.throw(GeneratorExit) except (GeneratorExit, StopIteration): pass else: raise RuntimeError("generator ignored GeneratorExit") @classmethod def __subclasshook__(cls, C): if cls is Generator: return _check_methods(C, '__iter__', '__next__', 'send', 'throw', 'close') return NotImplemented Generator.register(generator) class Sized(metaclass=ABCMeta): __slots__ = () @abstractmethod def __len__(self): return 0 @classmethod def __subclasshook__(cls, C): if cls is Sized: return _check_methods(C, "__len__") return NotImplemented class Container(metaclass=ABCMeta): __slots__ = () @abstractmethod def __contains__(self, x): return False @classmethod def __subclasshook__(cls, C): if cls is Container: return _check_methods(C, "__contains__") return NotImplemented class Collection(Sized, Iterable, Container): __slots__ = () @classmethod def __subclasshook__(cls, C): if cls is Collection: return _check_methods(C, "__len__", "__iter__", "__contains__") return NotImplemented class Callable(metaclass=ABCMeta): __slots__ = () @abstractmethod def __call__(self, *args, **kwds): return False @classmethod def __subclasshook__(cls, C): if cls is Callable: return _check_methods(C, "__call__") return NotImplemented ### SETS ### class Set(Collection): """A set is a finite, iterable container. This class provides concrete generic implementations of all methods except for __contains__, __iter__ and __len__. To override the comparisons (presumably for speed, as the semantics are fixed), redefine __le__ and __ge__, then the other operations will automatically follow suit. """ __slots__ = () def __le__(self, other): if not isinstance(other, Set): return NotImplemented if len(self) > len(other): return False for elem in self: if elem not in other: return False return True def __lt__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) < len(other) and self.__le__(other) def __gt__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented if len(self) < len(other): return False for elem in other: if elem not in self: return False return True def __eq__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) == len(other) and self.__le__(other) @classmethod def _from_iterable(cls, it): '''Construct an instance of the class from any iterable input. Must override this method if the class constructor signature does not accept an iterable for an input. ''' return cls(it) def __and__(self, other): if not isinstance(other, Iterable): return NotImplemented return self._from_iterable(value for value in other if value in self) __rand__ = __and__ def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: if value in self: return False return True def __or__(self, other): if not isinstance(other, Iterable): return NotImplemented chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) __ror__ = __or__ def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return self._from_iterable(value for value in self if value not in other) def __rsub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return self._from_iterable(value for value in other if value not in self) def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return (self - other) | (other - self) __rxor__ = __xor__ def _hash(self): """Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type. """ MAX = sys.maxsize MASK = 2 * MAX + 1 n = len(self) h = 1927868237 * (n + 1) h &= MASK for x in self: hx = hash(x) h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 h &= MASK h = h * 69069 + 907133923 h &= MASK if h > MAX: h -= MASK + 1 if h == -1: h = 590923713 return h Set.register(frozenset) class MutableSet(Set): """A mutable set is a finite, iterable container. This class provides concrete generic implementations of all methods except for __contains__, __iter__, __len__, add(), and discard(). To override the comparisons (presumably for speed, as the semantics are fixed), all you have to do is redefine __le__ and then the other operations will automatically follow suit. """ __slots__ = () @abstractmethod def add(self, value): """Add an element.""" raise NotImplementedError @abstractmethod def discard(self, value): """Remove an element. Do not raise an exception if absent.""" raise NotImplementedError def remove(self, value): """Remove an element. If not a member, raise a KeyError.""" if value not in self: raise KeyError(value) self.discard(value) def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) except StopIteration: raise KeyError from None self.discard(value) return value def clear(self): """This is slow (creates N new iterators!) but effective.""" try: while True: self.pop() except KeyError: pass def __ior__(self, it): for value in it: self.add(value) return self def __iand__(self, it): for value in (self - it): self.discard(value) return self def __ixor__(self, it): if it is self: self.clear() else: if not isinstance(it, Set): it = self._from_iterable(it) for value in it: if value in self: self.discard(value) else: self.add(value) return self def __isub__(self, it): if it is self: self.clear() else: for value in it: self.discard(value) return self MutableSet.register(set) ### MAPPINGS ### class Mapping(Collection): __slots__ = () """A Mapping is a generic container for associating key/value pairs. This class provides concrete generic implementations of all methods except for __getitem__, __iter__, and __len__. """ @abstractmethod def __getitem__(self, key): raise KeyError def get(self, key, default=None): 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' try: return self[key] except KeyError: return default def __contains__(self, key): try: self[key] except KeyError: return False else: return True def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return KeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return ItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return ValuesView(self) def __eq__(self, other): if not isinstance(other, Mapping): return NotImplemented return dict(self.items()) == dict(other.items()) __reversed__ = None Mapping.register(mappingproxy) class MappingView(Sized): __slots__ = '_mapping', def __init__(self, mapping): self._mapping = mapping def __len__(self): return len(self._mapping) def __repr__(self): return '{0.__class__.__name__}({0._mapping!r})'.format(self) class KeysView(MappingView, Set): __slots__ = () @classmethod def _from_iterable(self, it): return set(it) def __contains__(self, key): return key in self._mapping def __iter__(self): yield from self._mapping KeysView.register(dict_keys) class ItemsView(MappingView, Set): __slots__ = () @classmethod def _from_iterable(self, it): return set(it) def __contains__(self, item): key, value = item try: v = self._mapping[key] except KeyError: return False else: return v is value or v == value def __iter__(self): for key in self._mapping: yield (key, self._mapping[key]) ItemsView.register(dict_items) class ValuesView(MappingView, Collection): __slots__ = () def __contains__(self, value): for key in self._mapping: v = self._mapping[key] if v is value or v == value: return True return False def __iter__(self): for key in self._mapping: yield self._mapping[key] ValuesView.register(dict_values) class MutableMapping(Mapping): __slots__ = () """A MutableMapping is a generic container for associating key/value pairs. This class provides concrete generic implementations of all methods except for __getitem__, __setitem__, __delitem__, __iter__, and __len__. """ @abstractmethod def __setitem__(self, key, value): raise KeyError @abstractmethod def __delitem__(self, key): raise KeyError __marker = object() def pop(self, key, default=__marker): '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def popitem(self): '''D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty. ''' try: key = next(iter(self)) except StopIteration: raise KeyError from None value = self[key] del self[key] return key, value def clear(self): 'D.clear() -> None. Remove all items from D.' try: while True: self.popitem() except KeyError: pass def update(self, other=(), /, **kwds): ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k, v in F.items(): D[k] = v ''' if isinstance(other, Mapping): for key in other: self[key] = other[key] elif hasattr(other, "keys"): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value def setdefault(self, key, default=None): 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' try: return self[key] except KeyError: self[key] = default return default MutableMapping.register(dict) ### SEQUENCES ### class Sequence(Reversible, Collection): """All the operations on a read-only sequence. Concrete subclasses must override __new__ or __init__, __getitem__, and __len__. """ __slots__ = () @abstractmethod def __getitem__(self, index): raise IndexError def __iter__(self): i = 0 try: while True: v = self[i] yield v i += 1 except IndexError: return def __contains__(self, value): for v in self: if v is value or v == value: return True return False def __reversed__(self): for i in reversed(range(len(self))): yield self[i] def index(self, value, start=0, stop=None): '''S.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. Supporting start and stop arguments is optional, but recommended. ''' if start is not None and start < 0: start = max(len(self) + start, 0) if stop is not None and stop < 0: stop += len(self) i = start while stop is None or i < stop: try: v = self[i] if v is value or v == value: return i except IndexError: break i += 1 raise ValueError def count(self, value): 'S.count(value) -> integer -- return number of occurrences of value' return sum(1 for v in self if v is value or v == value) Sequence.register(tuple) Sequence.register(str) Sequence.register(range) Sequence.register(memoryview) class ByteString(Sequence): """This unifies bytes and bytearray. XXX Should add all their methods. """ __slots__ = () ByteString.register(bytes) ByteString.register(bytearray) class MutableSequence(Sequence): __slots__ = () """All the operations on a read-write sequence. Concrete subclasses must provide __new__ or __init__, __getitem__, __setitem__, __delitem__, __len__, and insert(). """ @abstractmethod def __setitem__(self, index, value): raise IndexError @abstractmethod def __delitem__(self, index): raise IndexError @abstractmethod def insert(self, index, value): 'S.insert(index, value) -- insert value before index' raise IndexError def append(self, value): 'S.append(value) -- append value to the end of the sequence' self.insert(len(self), value) def clear(self): 'S.clear() -> None -- remove all items from S' try: while True: self.pop() except IndexError: pass def reverse(self): 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i] def extend(self, values): 'S.extend(iterable) -- extend sequence by appending elements from the iterable' if values is self: values = list(values) for v in values: self.append(v) def pop(self, index=-1): '''S.pop([index]) -> item -- remove and return item at index (default last). Raise IndexError if list is empty or index is out of range. ''' v = self[index] del self[index] return v def remove(self, value): '''S.remove(value) -- remove first occurrence of value. Raise ValueError if the value is not present. ''' del self[self.index(value)] def __iadd__(self, values): self.extend(values) return self MutableSequence.register(list) MutableSequence.register(bytearray) # Multiply inheriting, see ByteString ================================================ FILE: rd/usr/lib/python3.8/_compat_pickle.py ================================================ # This module is used to map the old Python 2 names to the new names used in # Python 3 for the pickle module. This needed to make pickle streams # generated with Python 2 loadable by Python 3. # This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import # lib2to3 and use the mapping defined there, because lib2to3 uses pickle. # Thus, this could cause the module to be imported recursively. IMPORT_MAPPING = { '__builtin__' : 'builtins', 'copy_reg': 'copyreg', 'Queue': 'queue', 'SocketServer': 'socketserver', 'ConfigParser': 'configparser', 'repr': 'reprlib', 'tkFileDialog': 'tkinter.filedialog', 'tkSimpleDialog': 'tkinter.simpledialog', 'tkColorChooser': 'tkinter.colorchooser', 'tkCommonDialog': 'tkinter.commondialog', 'Dialog': 'tkinter.dialog', 'Tkdnd': 'tkinter.dnd', 'tkFont': 'tkinter.font', 'tkMessageBox': 'tkinter.messagebox', 'ScrolledText': 'tkinter.scrolledtext', 'Tkconstants': 'tkinter.constants', 'Tix': 'tkinter.tix', 'ttk': 'tkinter.ttk', 'Tkinter': 'tkinter', 'markupbase': '_markupbase', '_winreg': 'winreg', 'thread': '_thread', 'dummy_thread': '_dummy_thread', 'dbhash': 'dbm.bsd', 'dumbdbm': 'dbm.dumb', 'dbm': 'dbm.ndbm', 'gdbm': 'dbm.gnu', 'xmlrpclib': 'xmlrpc.client', 'SimpleXMLRPCServer': 'xmlrpc.server', 'httplib': 'http.client', 'htmlentitydefs' : 'html.entities', 'HTMLParser' : 'html.parser', 'Cookie': 'http.cookies', 'cookielib': 'http.cookiejar', 'BaseHTTPServer': 'http.server', 'test.test_support': 'test.support', 'commands': 'subprocess', 'urlparse' : 'urllib.parse', 'robotparser' : 'urllib.robotparser', 'urllib2': 'urllib.request', 'anydbm': 'dbm', '_abcoll' : 'collections.abc', } # This contains rename rules that are easy to handle. We ignore the more # complex stuff (e.g. mapping the names in the urllib and types modules). # These rules should be run before import names are fixed. NAME_MAPPING = { ('__builtin__', 'xrange'): ('builtins', 'range'), ('__builtin__', 'reduce'): ('functools', 'reduce'), ('__builtin__', 'intern'): ('sys', 'intern'), ('__builtin__', 'unichr'): ('builtins', 'chr'), ('__builtin__', 'unicode'): ('builtins', 'str'), ('__builtin__', 'long'): ('builtins', 'int'), ('itertools', 'izip'): ('builtins', 'zip'), ('itertools', 'imap'): ('builtins', 'map'), ('itertools', 'ifilter'): ('builtins', 'filter'), ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), ('UserList', 'UserList'): ('collections', 'UserList'), ('UserString', 'UserString'): ('collections', 'UserString'), ('whichdb', 'whichdb'): ('dbm', 'whichdb'), ('_socket', 'fromfd'): ('socket', 'fromfd'), ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), ('urllib', 'quote'): ('urllib.parse', 'quote'), ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), ('urllib', 'unquote'): ('urllib.parse', 'unquote'), ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), ('urllib2', 'URLError'): ('urllib.error', 'URLError'), } PYTHON2_EXCEPTIONS = ( "ArithmeticError", "AssertionError", "AttributeError", "BaseException", "BufferError", "BytesWarning", "DeprecationWarning", "EOFError", "EnvironmentError", "Exception", "FloatingPointError", "FutureWarning", "GeneratorExit", "IOError", "ImportError", "ImportWarning", "IndentationError", "IndexError", "KeyError", "KeyboardInterrupt", "LookupError", "MemoryError", "NameError", "NotImplementedError", "OSError", "OverflowError", "PendingDeprecationWarning", "ReferenceError", "RuntimeError", "RuntimeWarning", # StandardError is gone in Python 3, so we map it to Exception "StopIteration", "SyntaxError", "SyntaxWarning", "SystemError", "SystemExit", "TabError", "TypeError", "UnboundLocalError", "UnicodeDecodeError", "UnicodeEncodeError", "UnicodeError", "UnicodeTranslateError", "UnicodeWarning", "UserWarning", "ValueError", "Warning", "ZeroDivisionError", ) try: WindowsError except NameError: pass else: PYTHON2_EXCEPTIONS += ("WindowsError",) for excname in PYTHON2_EXCEPTIONS: NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) MULTIPROCESSING_EXCEPTIONS = ( 'AuthenticationError', 'BufferTooShort', 'ProcessError', 'TimeoutError', ) for excname in MULTIPROCESSING_EXCEPTIONS: NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) # Same, but for 3.x to 2.x REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) # Non-mutual mappings. IMPORT_MAPPING.update({ 'cPickle': 'pickle', '_elementtree': 'xml.etree.ElementTree', 'FileDialog': 'tkinter.filedialog', 'SimpleDialog': 'tkinter.simpledialog', 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', # For compatibility with broken pickles saved in old Python 3 versions 'UserDict': 'collections', 'UserList': 'collections', 'UserString': 'collections', 'whichdb': 'dbm', 'StringIO': 'io', 'cStringIO': 'io', }) REVERSE_IMPORT_MAPPING.update({ '_bz2': 'bz2', '_dbm': 'dbm', '_functools': 'functools', '_gdbm': 'gdbm', '_pickle': 'pickle', }) NAME_MAPPING.update({ ('__builtin__', 'basestring'): ('builtins', 'str'), ('exceptions', 'StandardError'): ('builtins', 'Exception'), ('UserDict', 'UserDict'): ('collections', 'UserDict'), ('socket', '_socketobject'): ('socket', 'SocketType'), }) REVERSE_NAME_MAPPING.update({ ('_functools', 'reduce'): ('__builtin__', 'reduce'), ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), ('xmlrpc.server', 'XMLRPCDocGenerator'): ('DocXMLRPCServer', 'XMLRPCDocGenerator'), ('xmlrpc.server', 'DocXMLRPCRequestHandler'): ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), ('xmlrpc.server', 'DocXMLRPCServer'): ('DocXMLRPCServer', 'DocXMLRPCServer'), ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), ('http.server', 'SimpleHTTPRequestHandler'): ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), ('http.server', 'CGIHTTPRequestHandler'): ('CGIHTTPServer', 'CGIHTTPRequestHandler'), ('_socket', 'socket'): ('socket', '_socketobject'), }) PYTHON3_OSERROR_EXCEPTIONS = ( 'BrokenPipeError', 'ChildProcessError', 'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError', 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError', 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', 'PermissionError', 'ProcessLookupError', 'TimeoutError', ) for excname in PYTHON3_OSERROR_EXCEPTIONS: REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') PYTHON3_IMPORTERROR_EXCEPTIONS = ( 'ModuleNotFoundError', ) for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') ================================================ FILE: rd/usr/lib/python3.8/_compression.py ================================================ """Internal classes used by the gzip, lzma and bz2 modules""" import io BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size class BaseStream(io.BufferedIOBase): """Mode-checking helper functions.""" def _check_not_closed(self): if self.closed: raise ValueError("I/O operation on closed file") def _check_can_read(self): if not self.readable(): raise io.UnsupportedOperation("File not open for reading") def _check_can_write(self): if not self.writable(): raise io.UnsupportedOperation("File not open for writing") def _check_can_seek(self): if not self.readable(): raise io.UnsupportedOperation("Seeking is only supported " "on files open for reading") if not self.seekable(): raise io.UnsupportedOperation("The underlying file object " "does not support seeking") class DecompressReader(io.RawIOBase): """Adapts the decompressor API to a RawIOBase reader API""" def readable(self): return True def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): self._fp = fp self._eof = False self._pos = 0 # Current offset in decompressed stream # Set to size of decompressed stream once it is known, for SEEK_END self._size = -1 # Save the decompressor factory and arguments. # If the file contains multiple compressed streams, each # stream will need a separate decompressor object. A new decompressor # object is also needed when implementing a backwards seek(). self._decomp_factory = decomp_factory self._decomp_args = decomp_args self._decompressor = self._decomp_factory(**self._decomp_args) # Exception class to catch from decompressor signifying invalid # trailing data to ignore self._trailing_error = trailing_error def close(self): self._decompressor = None return super().close() def seekable(self): return self._fp.seekable() def readinto(self, b): with memoryview(b) as view, view.cast("B") as byte_view: data = self.read(len(byte_view)) byte_view[:len(data)] = data return len(data) def read(self, size=-1): if size < 0: return self.readall() if not size or self._eof: return b"" data = None # Default if EOF is encountered # Depending on the input data, our call to the decompressor may not # return any data. In this case, try again after reading another block. while True: if self._decompressor.eof: rawblock = (self._decompressor.unused_data or self._fp.read(BUFFER_SIZE)) if not rawblock: break # Continue to next stream. self._decompressor = self._decomp_factory( **self._decomp_args) try: data = self._decompressor.decompress(rawblock, size) except self._trailing_error: # Trailing data isn't a valid compressed stream; ignore it. break else: if self._decompressor.needs_input: rawblock = self._fp.read(BUFFER_SIZE) if not rawblock: raise EOFError("Compressed file ended before the " "end-of-stream marker was reached") else: rawblock = b"" data = self._decompressor.decompress(rawblock, size) if data: break if not data: self._eof = True self._size = self._pos return b"" self._pos += len(data) return data # Rewind the file to the beginning of the data stream. def _rewind(self): self._fp.seek(0) self._eof = False self._pos = 0 self._decompressor = self._decomp_factory(**self._decomp_args) def seek(self, offset, whence=io.SEEK_SET): # Recalculate offset as an absolute file position. if whence == io.SEEK_SET: pass elif whence == io.SEEK_CUR: offset = self._pos + offset elif whence == io.SEEK_END: # Seeking relative to EOF - we need to know the file's size. if self._size < 0: while self.read(io.DEFAULT_BUFFER_SIZE): pass offset = self._size + offset else: raise ValueError("Invalid value for whence: {}".format(whence)) # Make it so that offset is the number of bytes to skip forward. if offset < self._pos: self._rewind() else: offset -= self._pos # Read and discard data until we reach the desired position. while offset > 0: data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) if not data: break offset -= len(data) return self._pos def tell(self): """Return the current file position.""" return self._pos ================================================ FILE: rd/usr/lib/python3.8/_dummy_thread.py ================================================ """Drop-in replacement for the thread module. Meant to be used as a brain-dead substitute so that threaded code does not need to be rewritten for when the thread module is not present. Suggested usage is:: try: import _thread except ImportError: import _dummy_thread as _thread """ # Exports only things specified by thread documentation; # skipping obsolete synonyms allocate(), start_new(), exit_thread(). __all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', 'interrupt_main', 'LockType', 'RLock'] # A dummy value TIMEOUT_MAX = 2**31 # NOTE: this module can be imported early in the extension building process, # and so top level imports of other modules should be avoided. Instead, all # imports are done when needed on a function-by-function basis. Since threads # are disabled, the import lock should not be an issue anyway (??). error = RuntimeError def start_new_thread(function, args, kwargs={}): """Dummy implementation of _thread.start_new_thread(). Compatibility is maintained by making sure that ``args`` is a tuple and ``kwargs`` is a dictionary. If an exception is raised and it is SystemExit (which can be done by _thread.exit()) it is caught and nothing is done; all other exceptions are printed out by using traceback.print_exc(). If the executed function calls interrupt_main the KeyboardInterrupt will be raised when the function returns. """ if type(args) != type(tuple()): raise TypeError("2nd arg must be a tuple") if type(kwargs) != type(dict()): raise TypeError("3rd arg must be a dict") global _main _main = False try: function(*args, **kwargs) except SystemExit: pass except: import traceback traceback.print_exc() _main = True global _interrupt if _interrupt: _interrupt = False raise KeyboardInterrupt def exit(): """Dummy implementation of _thread.exit().""" raise SystemExit def get_ident(): """Dummy implementation of _thread.get_ident(). Since this module should only be used when _threadmodule is not available, it is safe to assume that the current process is the only thread. Thus a constant can be safely returned. """ return 1 def allocate_lock(): """Dummy implementation of _thread.allocate_lock().""" return LockType() def stack_size(size=None): """Dummy implementation of _thread.stack_size().""" if size is not None: raise error("setting thread stack size not supported") return 0 def _set_sentinel(): """Dummy implementation of _thread._set_sentinel().""" return LockType() class LockType(object): """Class implementing dummy implementation of _thread.LockType. Compatibility is maintained by maintaining self.locked_status which is a boolean that stores the state of the lock. Pickling of the lock, though, should not be done since if the _thread module is then used with an unpickled ``lock()`` from here problems could occur from this class not having atomic methods. """ def __init__(self): self.locked_status = False def acquire(self, waitflag=None, timeout=-1): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: if timeout > 0: import time time.sleep(timeout) return False __enter__ = acquire def __exit__(self, typ, val, tb): self.release() def release(self): """Release the dummy lock.""" # XXX Perhaps shouldn't actually bother to test? Could lead # to problems for complex, threaded code. if not self.locked_status: raise error self.locked_status = False return True def locked(self): return self.locked_status def __repr__(self): return "<%s %s.%s object at %s>" % ( "locked" if self.locked_status else "unlocked", self.__class__.__module__, self.__class__.__qualname__, hex(id(self)) ) class RLock(LockType): """Dummy implementation of threading._RLock. Re-entrant lock can be aquired multiple times and needs to be released just as many times. This dummy implemention does not check wheter the current thread actually owns the lock, but does accounting on the call counts. """ def __init__(self): super().__init__() self._levels = 0 def acquire(self, waitflag=None, timeout=-1): """Aquire the lock, can be called multiple times in succession. """ locked = super().acquire(waitflag, timeout) if locked: self._levels += 1 return locked def release(self): """Release needs to be called once for every call to acquire(). """ if self._levels == 0: raise error if self._levels == 1: super().release() self._levels -= 1 # Used to signal that interrupt_main was called in a "thread" _interrupt = False # True when not executing in a "thread" _main = True def interrupt_main(): """Set _interrupt flag to True to have start_new_thread raise KeyboardInterrupt upon exiting.""" if _main: raise KeyboardInterrupt else: global _interrupt _interrupt = True ================================================ FILE: rd/usr/lib/python3.8/_markupbase.py ================================================ """Shared support for scanning document type declarations in HTML and XHTML. This module is used as a foundation for the html.parser module. It has no documented public API and should not be used directly. """ import re _declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match _declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match _commentclose = re.compile(r'--\s*>') _markedsectionclose = re.compile(r']\s*]\s*>') # An analysis of the MS-Word extensions is available at # http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf _msmarkedsectionclose = re.compile(r']\s*>') del re class ParserBase: """Parser base class which provides some common support methods used by the SGML/HTML and XHTML parsers.""" def __init__(self): if self.__class__ is ParserBase: raise RuntimeError( "_markupbase.ParserBase must be subclassed") def error(self, message): raise NotImplementedError( "subclasses of ParserBase must override error()") def reset(self): self.lineno = 1 self.offset = 0 def getpos(self): """Return current line number and offset.""" return self.lineno, self.offset # Internal -- update line number and offset. This should be # called for each piece of data exactly once, in order -- in other # words the concatenation of all the input strings to this # function should be exactly the entire input. def updatepos(self, i, j): if i >= j: return j rawdata = self.rawdata nlines = rawdata.count("\n", i, j) if nlines: self.lineno = self.lineno + nlines pos = rawdata.rindex("\n", i, j) # Should not fail self.offset = j-(pos+1) else: self.offset = self.offset + j-i return j _decl_otherchars = '' # Internal -- parse declaration (for use by subclasses). def parse_declaration(self, i): # This is some sort of declaration; in "HTML as # deployed," this should only be the document type # declaration (""). # ISO 8879:1986, however, has more complex # declaration syntax for elements in , including: # --comment-- # [marked section] # name in the following list: ENTITY, DOCTYPE, ELEMENT, # ATTLIST, NOTATION, SHORTREF, USEMAP, # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM rawdata = self.rawdata j = i + 2 assert rawdata[i:j] == "": # the empty comment return j + 1 if rawdata[j:j+1] in ("-", ""): # Start of comment followed by buffer boundary, # or just a buffer boundary. return -1 # A simple, practical version could look like: ((name|stringlit) S*) + '>' n = len(rawdata) if rawdata[j:j+2] == '--': #comment # Locate --.*-- as the body of the comment return self.parse_comment(i) elif rawdata[j] == '[': #marked section # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA # Note that this is extended by Microsoft Office "Save as Web" function # to include [if...] and [endif]. return self.parse_marked_section(i) else: #all other declaration elements decltype, j = self._scan_name(j, i) if j < 0: return j if decltype == "doctype": self._decl_otherchars = '' while j < n: c = rawdata[j] if c == ">": # end of declaration syntax data = rawdata[i+2:j] if decltype == "doctype": self.handle_decl(data) else: # According to the HTML5 specs sections "8.2.4.44 Bogus # comment state" and "8.2.4.45 Markup declaration open # state", a comment token should be emitted. # Calling unknown_decl provides more flexibility though. self.unknown_decl(data) return j + 1 if c in "\"'": m = _declstringlit_match(rawdata, j) if not m: return -1 # incomplete j = m.end() elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": name, j = self._scan_name(j, i) elif c in self._decl_otherchars: j = j + 1 elif c == "[": # this could be handled in a separate doctype parser if decltype == "doctype": j = self._parse_doctype_subset(j + 1, i) elif decltype in {"attlist", "linktype", "link", "element"}: # must tolerate []'d groups in a content model in an element declaration # also in data attribute specifications of attlist declaration # also link type declaration subsets in linktype declarations # also link attribute specification lists in link declarations self.error("unsupported '[' char in %s declaration" % decltype) else: self.error("unexpected '[' char in declaration") else: self.error( "unexpected %r char in declaration" % rawdata[j]) if j < 0: return j return -1 # incomplete # Internal -- parse a marked section # Override this to handle MS-word extension syntax content def parse_marked_section(self, i, report=1): rawdata= self.rawdata assert rawdata[i:i+3] == ' ending match= _markedsectionclose.search(rawdata, i+3) elif sectName in {"if", "else", "endif"}: # look for MS Office ]> ending match= _msmarkedsectionclose.search(rawdata, i+3) else: self.error('unknown status keyword %r in marked section' % rawdata[i+3:j]) if not match: return -1 if report: j = match.start(0) self.unknown_decl(rawdata[i+3: j]) return match.end(0) # Internal -- parse comment, return length or -1 if not terminated def parse_comment(self, i, report=1): rawdata = self.rawdata if rawdata[i:i+4] != ' --> --> ''' __UNDEF__ = [] # a special sentinel object def small(text): if text: return '' + text + '' else: return '' def strong(text): if text: return '' + text + '' else: return '' def grey(text): if text: return '' + text + '' else: return '' def lookup(name, frame, locals): """Find the value for a given name in the given environment.""" if name in locals: return 'local', locals[name] if name in frame.f_globals: return 'global', frame.f_globals[name] if '__builtins__' in frame.f_globals: builtins = frame.f_globals['__builtins__'] if type(builtins) is type({}): if name in builtins: return 'builtin', builtins[name] else: if hasattr(builtins, name): return 'builtin', getattr(builtins, name) return None, __UNDEF__ def scanvars(reader, frame, locals): """Scan one logical line of Python and look up values of variables used.""" vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ for ttype, token, start, end, line in tokenize.generate_tokens(reader): if ttype == tokenize.NEWLINE: break if ttype == tokenize.NAME and token not in keyword.kwlist: if lasttoken == '.': if parent is not __UNDEF__: value = getattr(parent, token, __UNDEF__) vars.append((prefix + token, prefix, value)) else: where, value = lookup(token, frame, locals) vars.append((token, where, value)) elif token == '.': prefix += lasttoken + '.' parent = value else: parent, prefix = None, '' lasttoken = token return vars def html(einfo, context=5): """Return a nice HTML document describing a given traceback.""" etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '' + pydoc.html.heading( '%s' % strong(pydoc.html.escape(str(etype))), '#ffffff', '#6622aa', pyver + '
' + date) + '''

A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred.

''' indent = '' + small(' ' * 5) + ' ' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: if file: file = os.path.abspath(file) link = '%s' % (file, pydoc.html.escape(file)) else: file = link = '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + strong(pydoc.html.escape(func)) if func != "": call += inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.html.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = ['%s%s %s' % (' ', link, call)] if index is not None: i = lnum - index for line in lines: num = small(' ' * (5-len(str(i))) + str(i)) + ' ' if i in highlight: line = '=>%s%s' % (num, pydoc.html.preformat(line)) rows.append('%s' % line) else: line = '  %s%s' % (num, pydoc.html.preformat(line)) rows.append('%s' % grey(line)) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where in ('global', 'builtin'): name = ('%s ' % where) + strong(name) elif where == 'local': name = strong(name) else: name = where + strong(name.split('.')[-1]) dump.append('%s = %s' % (name, pydoc.html.repr(value))) else: dump.append(name + ' undefined') rows.append('%s' % small(grey(', '.join(dump)))) frames.append(''' %s
''' % '\n'.join(rows)) exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), pydoc.html.escape(str(evalue)))] for name in dir(evalue): if name[:1] == '_': continue value = pydoc.html.repr(getattr(evalue, name)) exception.append('\n
%s%s =\n%s' % (indent, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' ''' % pydoc.html.escape( ''.join(traceback.format_exception(etype, evalue, etb))) def text(einfo, context=5): """Return a plain text document describing a given traceback.""" etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred. ''' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: file = file and os.path.abspath(file) or '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + func if func != "": call += inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.text.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = [' %s %s' % (file, call)] if index is not None: i = lnum - index for line in lines: num = '%5d ' % i rows.append(num+line.rstrip()) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where == 'global': name = 'global ' + name elif where != 'local': name = where + name.split('.')[-1] dump.append('%s = %s' % (name, pydoc.text.repr(value))) else: dump.append(name + ' undefined') rows.append('\n'.join(dump)) frames.append('\n%s\n' % '\n'.join(rows)) exception = ['%s: %s' % (str(etype), str(evalue))] for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (" "*4, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' The above is a description of an error in a Python program. Here is the original traceback: %s ''' % ''.join(traceback.format_exception(etype, evalue, etb)) class Hook: """A hook to replace sys.excepthook that shows tracebacks in HTML.""" def __init__(self, display=1, logdir=None, context=5, file=None, format="html"): self.display = display # send tracebacks to browser if true self.logdir = logdir # log tracebacks to files if not None self.context = context # number of source code lines per frame self.file = file or sys.stdout # place to send the output self.format = format def __call__(self, etype, evalue, etb): self.handle((etype, evalue, etb)) def handle(self, info=None): info = info or sys.exc_info() if self.format == "html": self.file.write(reset()) formatter = (self.format=="html") and html or text plain = False try: doc = formatter(info, self.context) except: # just in case something goes wrong doc = ''.join(traceback.format_exception(*info)) plain = True if self.display: if plain: doc = pydoc.html.escape(doc) self.file.write('

' + doc + '
\n') else: self.file.write(doc + '\n') else: self.file.write('

A problem occurred in a Python script.\n') if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) try: with os.fdopen(fd, 'w') as file: file.write(doc) msg = '%s contains the description of this error.' % path except: msg = 'Tried to save traceback to %s, but failed.' % path if self.format == 'html': self.file.write('

%s

\n' % msg) else: self.file.write(msg + '\n') try: self.file.flush() except: pass handler = Hook().handle def enable(display=1, logdir=None, context=5, format="html"): """Install an exception handler that formats tracebacks as HTML. The optional argument 'display' can be set to 0 to suppress sending the traceback to the browser, and 'logdir' can be set to a directory to cause tracebacks to be written to files there.""" sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format) ================================================ FILE: rd/usr/lib/python3.8/chunk.py ================================================ """Simple class to read IFF chunks. An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File Format)) has the following structure: +----------------+ | ID (4 bytes) | +----------------+ | size (4 bytes) | +----------------+ | data | | ... | +----------------+ The ID is a 4-byte string which identifies the type of chunk. The size field (a 32-bit value, encoded using big-endian byte order) gives the size of the whole chunk, including the 8-byte header. Usually an IFF-type file consists of one or more chunks. The proposed usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end of the file, creating a new instance will fail with an EOFError exception. Usage: while True: try: chunk = Chunk(file) except EOFError: break chunktype = chunk.getname() while True: data = chunk.read(nbytes) if not data: pass # do something with data The interface is file-like. The implemented methods are: read, close, seek, tell, isatty. Extra methods are: skip() (called by close, skips to the end of the chunk), getname() (returns the name (ID) of the chunk) The __init__ method has one required argument, a file-like object (including a chunk instance), and one optional argument, a flag which specifies whether or not chunks are aligned on 2-byte boundaries. The default is 1, i.e. aligned. """ class Chunk: def __init__(self, file, align=True, bigendian=True, inclheader=False): import struct self.closed = False self.align = align # whether to align to word (2-byte) boundaries if bigendian: strflag = '>' else: strflag = '<' self.file = file self.chunkname = file.read(4) if len(self.chunkname) < 4: raise EOFError try: self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] except struct.error: raise EOFError from None if inclheader: self.chunksize = self.chunksize - 8 # subtract header self.size_read = 0 try: self.offset = self.file.tell() except (AttributeError, OSError): self.seekable = False else: self.seekable = True def getname(self): """Return the name (ID) of the current chunk.""" return self.chunkname def getsize(self): """Return the size of the current chunk.""" return self.chunksize def close(self): if not self.closed: try: self.skip() finally: self.closed = True def isatty(self): if self.closed: raise ValueError("I/O operation on closed file") return False def seek(self, pos, whence=0): """Seek to specified position into the chunk. Default position is 0 (start of chunk). If the file is not seekable, this will result in an error. """ if self.closed: raise ValueError("I/O operation on closed file") if not self.seekable: raise OSError("cannot seek") if whence == 1: pos = pos + self.size_read elif whence == 2: pos = pos + self.chunksize if pos < 0 or pos > self.chunksize: raise RuntimeError self.file.seek(self.offset + pos, 0) self.size_read = pos def tell(self): if self.closed: raise ValueError("I/O operation on closed file") return self.size_read def read(self, size=-1): """Read at most size bytes from the chunk. If size is omitted or negative, read until the end of the chunk. """ if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data def skip(self): """Skip the rest of the chunk. If you are not interested in the contents of the chunk, this method should be called so that the file points to the start of the next chunk. """ if self.closed: raise ValueError("I/O operation on closed file") if self.seekable: try: n = self.chunksize - self.size_read # maybe fix alignment if self.align and (self.chunksize & 1): n = n + 1 self.file.seek(n, 1) self.size_read = self.size_read + n return except OSError: pass while self.size_read < self.chunksize: n = min(8192, self.chunksize - self.size_read) dummy = self.read(n) if not dummy: raise EOFError ================================================ FILE: rd/usr/lib/python3.8/cmd.py ================================================ """A generic class to build line-oriented command interpreters. Interpreters constructed with this class obey the following conventions: 1. End of file on input is processed as the command 'EOF'. 2. A command is parsed out of each line by collecting the prefix composed of characters in the identchars member. 3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method is passed a single argument consisting of the remainder of the line. 4. Typing an empty line repeats the last command. (Actually, it calls the method `emptyline', which may be overridden in a subclass.) 5. There is a predefined `help' method. Given an argument `topic', it calls the command `help_topic'. With no arguments, it lists all topics with defined help_ functions, broken into up to three topics; documented commands, miscellaneous help topics, and undocumented commands. 6. The command '?' is a synonym for `help'. The command '!' is a synonym for `shell', if a do_shell method exists. 7. If completion is enabled, completing commands will be done automatically, and completing of commands args is done by calling complete_foo() with arguments text, line, begidx, endidx. text is string we are matching against, all returned matches must begin with it. line is the current input line (lstripped), begidx and endidx are the beginning and end indexes of the text being matched, which could be used to provide different completion depending upon which position the argument is in. The `default' method may be overridden to intercept commands for which there is no do_ method. The `completedefault' method may be overridden to intercept completions for commands that have no complete_ method. The data member `self.ruler' sets the character used to draw separator lines in the help messages. If empty, no ruler line is drawn. It defaults to "=". If the value of `self.intro' is nonempty when the cmdloop method is called, it is printed out on interpreter startup. This value may be overridden via an optional argument to the cmdloop() method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used for the help function's listings of documented functions, miscellaneous topics, and undocumented functions respectively. """ import string, sys __all__ = ["Cmd"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits + '_' class Cmd: """A simple framework for writing line-oriented command interpreters. These are often useful for test harnesses, administrative tools, and prototypes that will later be wrapped in a more sophisticated interface. A Cmd instance or subclass instance is a line-oriented interpreter framework. There is no good reason to instantiate Cmd itself; rather, it's useful as a superclass of an interpreter class you define yourself in order to inherit Cmd's methods and encapsulate action methods. """ prompt = PROMPT identchars = IDENTCHARS ruler = '=' lastcmd = '' intro = None doc_leader = "" doc_header = "Documented commands (type help ):" misc_header = "Miscellaneous help topics:" undoc_header = "Undocumented commands:" nohelp = "*** No help on %s" use_rawinput = 1 def __init__(self, completekey='tab', stdin=None, stdout=None): """Instantiate a line-oriented interpreter framework. The optional argument 'completekey' is the readline name of a completion key; it defaults to the Tab key. If completekey is not None and the readline module is available, command completion is done automatically. The optional arguments stdin and stdout specify alternate input and output file objects; if not specified, sys.stdin and sys.stdout are used. """ if stdin is not None: self.stdin = stdin else: self.stdin = sys.stdin if stdout is not None: self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey = completekey def cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument. """ self.preloop() if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey+": complete") except ImportError: pass try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+"\n") stop = None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: line = input(self.prompt) except EOFError: line = 'EOF' else: self.stdout.write(self.prompt) self.stdout.flush() line = self.stdin.readline() if not len(line): line = 'EOF' else: line = line.rstrip('\r\n') line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass def precmd(self, line): """Hook method executed just before the command line is interpreted, but after the input prompt is generated and issued. """ return line def postcmd(self, stop, line): """Hook method executed just after a command dispatch is finished.""" return stop def preloop(self): """Hook method executed once when the cmdloop() method is called.""" pass def postloop(self): """Hook method executed once when the cmdloop() method is about to return. """ pass def parseline(self, line): """Parse the line into a command name and a string containing the arguments. Returns a tuple containing (command, args, line). 'command' and 'args' may be None if the line couldn't be parsed. """ line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line def onecmd(self, line): """Interpret the argument as though it had been typed in response to the prompt. This may be overridden, but should not normally need to be; see the precmd() and postcmd() methods for useful execution hooks. The return value is a flag indicating whether interpretation of commands by the interpreter should stop. """ cmd, arg, line = self.parseline(line) if not line: return self.emptyline() if cmd is None: return self.default(line) self.lastcmd = line if line == 'EOF' : self.lastcmd = '' if cmd == '': return self.default(line) else: try: func = getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) return func(arg) def emptyline(self): """Called when an empty line is entered in response to the prompt. If this method is not overridden, it repeats the last nonempty command entered. """ if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): """Called on an input line when the command prefix is not recognized. If this method is not overridden, it prints an error message and returns. """ self.stdout.write('*** Unknown syntax: %s\n'%line) def completedefault(self, *ignored): """Method called to complete an input line when no command-specific complete_*() method is available. By default, it returns an empty list. """ return [] def completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self, text, state): """Return the next possible completion for 'text'. If a command has not been entered, then complete against command list. Otherwise try to call complete_ to get list of completions. """ if state == 0: import readline origline = readline.get_line_buffer() line = origline.lstrip() stripped = len(origline) - len(line) begidx = readline.get_begidx() - stripped endidx = readline.get_endidx() - stripped if begidx>0: cmd, args, foo = self.parseline(line) if cmd == '': compfunc = self.completedefault else: try: compfunc = getattr(self, 'complete_' + cmd) except AttributeError: compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except IndexError: return None def get_names(self): # This method used to pull in base class attributes # at a time dir() didn't do it yet. return dir(self.__class__) def complete_help(self, *args): commands = set(self.completenames(*args)) topics = set(a[5:] for a in self.get_names() if a.startswith('help_' + args[0])) return list(commands | topics) def do_help(self, arg): 'List available commands with "help" or detailed help with "help cmd".' if arg: # XXX check arg syntax try: func = getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write("%s\n"%str(doc)) return except AttributeError: pass self.stdout.write("%s\n"%str(self.nohelp % (arg,))) return func() else: names = self.get_names() cmds_doc = [] cmds_undoc = [] help = {} for name in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can be duplicates if routines overridden prevname = '' for name in names: if name[:3] == 'do_': if name == prevname: continue prevname = name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write("%s\n"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write("%s\n"%str(header)) if self.ruler: self.stdout.write("%s\n"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write("\n") def columnize(self, list, displaywidth=80): """Display a list of strings as a compact set of columns. Each column is only as wide as necessary. Columns are separated by two spaces (one was not legible enough). """ if not list: self.stdout.write("\n") return nonstrings = [i for i in range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise TypeError("list[i] not a string for i in %s" % ", ".join(map(str, nonstrings))) size = len(list) if size == 1: self.stdout.write('%s\n'%str(list[0])) return # Try every row count from 1 upwards for nrows in range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths = [] totwidth = -2 for col in range(ncols): colwidth = 0 for row in range(nrows): i = row + nrows*col if i >= size: break x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth > displaywidth: break if totwidth <= displaywidth: break else: nrows = len(list) ncols = 1 colwidths = [0] for row in range(nrows): texts = [] for col in range(ncols): i = row + nrows*col if i >= size: x = "" else: x = list[i] texts.append(x) while texts and not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write("%s\n"%str(" ".join(texts))) ================================================ FILE: rd/usr/lib/python3.8/code.py ================================================ """Utilities needed to emulate Python's interactive interpreter. """ # Inspired by similar code by Jeff Epler and Fredrik Lundh. import sys import traceback from codeop import CommandCompiler, compile_command __all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", "compile_command"] class InteractiveInterpreter: """Base class for InteractiveConsole. This class deals with parsing and interpreter state (the user's namespace); it doesn't deal with input buffering or prompting or input file naming (the filename is always passed in explicitly). """ def __init__(self, locals=None): """Constructor. The optional 'locals' argument specifies the dictionary in which code will be executed; it defaults to a newly created dictionary with key "__name__" set to "__console__" and key "__doc__" set to None. """ if locals is None: locals = {"__name__": "__console__", "__doc__": None} self.locals = locals self.compile = CommandCompiler() def runsource(self, source, filename="", symbol="single"): """Compile and run some source in the interpreter. Arguments are as for compile_command(). One of several things can happen: 1) The input is incorrect; compile_command() raised an exception (SyntaxError or OverflowError). A syntax traceback will be printed by calling the showsyntaxerror() method. 2) The input is incomplete, and more input is required; compile_command() returned None. Nothing happens. 3) The input is complete; compile_command() returned a code object. The code is executed by calling self.runcode() (which also handles run-time exceptions, except for SystemExit). The return value is True in case 2, False in the other cases (unless an exception is raised). The return value can be used to decide whether to use sys.ps1 or sys.ps2 to prompt the next line. """ try: code = self.compile(source, filename, symbol) except (OverflowError, SyntaxError, ValueError): # Case 1 self.showsyntaxerror(filename) return False if code is None: # Case 2 return True # Case 3 self.runcode(code) return False def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: exec(code, self.locals) except SystemExit: raise except: self.showtraceback() def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred. This doesn't display a stack trace because there isn't one. If a filename is given, it is stuffed in the exception instead of what was there before (because Python's parser always uses "" when reading from a string). The output is written by self.write(), below. """ type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: msg, (dummy_filename, lineno, offset, line) = value.args except ValueError: # Not the format we expect; leave it alone pass else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value if sys.excepthook is sys.__excepthook__: lines = traceback.format_exception_only(type, value) self.write(''.join(lines)) else: # If someone has set sys.excepthook, we let that take precedence # over self.write sys.excepthook(type, value, tb) def showtraceback(self): """Display the exception that just occurred. We remove the first stack item because it is our own code. The output is written by self.write(), below. """ sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() sys.last_traceback = last_tb try: lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) if sys.excepthook is sys.__excepthook__: self.write(''.join(lines)) else: # If someone has set sys.excepthook, we let that take precedence # over self.write sys.excepthook(ei[0], ei[1], last_tb) finally: last_tb = ei = None def write(self, data): """Write a string. The base implementation writes to sys.stderr; a subclass may replace this with a different implementation. """ sys.stderr.write(data) class InteractiveConsole(InteractiveInterpreter): """Closely emulate the behavior of the interactive Python interpreter. This class builds on InteractiveInterpreter and adds prompting using the familiar sys.ps1 and sys.ps2, and input buffering. """ def __init__(self, locals=None, filename=""): """Constructor. The optional locals argument will be passed to the InteractiveInterpreter base class. The optional filename argument should specify the (file)name of the input stream; it will show up in tracebacks. """ InteractiveInterpreter.__init__(self, locals) self.filename = filename self.resetbuffer() def resetbuffer(self): """Reset the input buffer.""" self.buffer = [] def interact(self, banner=None, exitmsg=None): """Closely emulate the interactive Python console. The optional banner argument specifies the banner to print before the first interaction; by default it prints a banner similar to the one printed by the real Python interpreter, followed by the current class name in parentheses (so as not to confuse this with the real interpreter -- since it's so close!). The optional exitmsg argument specifies the exit message printed when exiting. Pass the empty string to suppress printing an exit message. If exitmsg is not given or None, a default message is printed. """ try: sys.ps1 except AttributeError: sys.ps1 = ">>> " try: sys.ps2 except AttributeError: sys.ps2 = "... " cprt = 'Type "help", "copyright", "credits" or "license" for more information.' if banner is None: self.write("Python %s on %s\n%s\n(%s)\n" % (sys.version, sys.platform, cprt, self.__class__.__name__)) elif banner: self.write("%s\n" % str(banner)) more = 0 while 1: try: if more: prompt = sys.ps2 else: prompt = sys.ps1 try: line = self.raw_input(prompt) except EOFError: self.write("\n") break else: more = self.push(line) except KeyboardInterrupt: self.write("\nKeyboardInterrupt\n") self.resetbuffer() more = 0 if exitmsg is None: self.write('now exiting %s...\n' % self.__class__.__name__) elif exitmsg != '': self.write('%s\n' % exitmsg) def push(self, line): """Push a line to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter's runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is 1 if more input is required, 0 if the line was dealt with in some way (this is the same as runsource()). """ self.buffer.append(line) source = "\n".join(self.buffer) more = self.runsource(source, self.filename) if not more: self.resetbuffer() return more def raw_input(self, prompt=""): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. The base implementation uses the built-in function input(); a subclass may replace this with a different implementation. """ return input(prompt) def interact(banner=None, readfunc=None, local=None, exitmsg=None): """Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole class. When readfunc is not specified, it attempts to import the readline module to enable GNU readline if it is available. Arguments (all optional, all default to None): banner -- passed to InteractiveConsole.interact() readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() exitmsg -- passed to InteractiveConsole.interact() """ console = InteractiveConsole(local) if readfunc is not None: console.raw_input = readfunc else: try: import readline except ImportError: pass console.interact(banner, exitmsg) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('-q', action='store_true', help="don't print version and copyright messages") args = parser.parse_args() if args.q or sys.flags.quiet: banner = '' else: banner = None interact(banner) ================================================ FILE: rd/usr/lib/python3.8/codecs.py ================================================ """ codecs -- Python Codec Registry, API and helpers. Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import builtins import sys ### Registry and builtin stateless codec functions try: from _codecs import * except ImportError as why: raise SystemError('Failed to load the builtin codecs: %s' % why) __all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", "StreamReader", "StreamWriter", "StreamReaderWriter", "StreamRecoder", "getencoder", "getdecoder", "getincrementalencoder", "getincrementaldecoder", "getreader", "getwriter", "encode", "decode", "iterencode", "iterdecode", "strict_errors", "ignore_errors", "replace_errors", "xmlcharrefreplace_errors", "backslashreplace_errors", "namereplace_errors", "register_error", "lookup_error"] ### Constants # # Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) # and its possible byte string values # for UTF8/UTF16/UTF32 output and little/big endian machines # # UTF-8 BOM_UTF8 = b'\xef\xbb\xbf' # UTF-16, little endian BOM_LE = BOM_UTF16_LE = b'\xff\xfe' # UTF-16, big endian BOM_BE = BOM_UTF16_BE = b'\xfe\xff' # UTF-32, little endian BOM_UTF32_LE = b'\xff\xfe\x00\x00' # UTF-32, big endian BOM_UTF32_BE = b'\x00\x00\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_LE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_LE else: # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_BE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_BE # Old broken names (don't use in new code) BOM32_LE = BOM_UTF16_LE BOM32_BE = BOM_UTF16_BE BOM64_LE = BOM_UTF32_LE BOM64_BE = BOM_UTF32_BE ### Codec base classes (defining the API) class CodecInfo(tuple): """Codec details when looking up the codec registry""" # Private API to allow Python 3.4 to blacklist the known non-Unicode # codecs in the standard library. A more general mechanism to # reliably distinguish test encodings from other codecs will hopefully # be defined for Python 3.5 # # See http://bugs.python.org/issue19619 _is_text_encoding = True # Assume codecs are text encodings by default def __new__(cls, encode, decode, streamreader=None, streamwriter=None, incrementalencoder=None, incrementaldecoder=None, name=None, *, _is_text_encoding=None): self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) self.name = name self.encode = encode self.decode = decode self.incrementalencoder = incrementalencoder self.incrementaldecoder = incrementaldecoder self.streamwriter = streamwriter self.streamreader = streamreader if _is_text_encoding is not None: self._is_text_encoding = _is_text_encoding return self def __repr__(self): return "<%s.%s object for encoding %s at %#x>" % \ (self.__class__.__module__, self.__class__.__qualname__, self.name, id(self)) class Codec: """ Defines the interface for stateless encoders/decoders. The .encode()/.decode() methods may use different error handling schemes by providing the errors argument. These string values are predefined: 'strict' - raise a ValueError error (or a subclass) 'ignore' - ignore the character and continue with the next 'replace' - replace with a suitable replacement character; Python will use the official U+FFFD REPLACEMENT CHARACTER for the builtin Unicode codecs on decoding and '?' on encoding. 'surrogateescape' - replace with private code points U+DCnn. 'xmlcharrefreplace' - Replace with the appropriate XML character reference (only for encoding). 'backslashreplace' - Replace with backslashed escape sequences. 'namereplace' - Replace with \\N{...} escape sequences (only for encoding). The set of allowed values can be extended via register_error. """ def encode(self, input, errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamWriter for codecs which have to keep state in order to make encoding efficient. The encoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError def decode(self, input, errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamReader for codecs which have to keep state in order to make decoding efficient. The decoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError class IncrementalEncoder(object): """ An IncrementalEncoder encodes an input in multiple steps. The input can be passed piece by piece to the encode() method. The IncrementalEncoder remembers the state of the encoding process between calls to encode(). """ def __init__(self, errors='strict'): """ Creates an IncrementalEncoder instance. The IncrementalEncoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors self.buffer = "" def encode(self, input, final=False): """ Encodes input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Resets the encoder to the initial state. """ def getstate(self): """ Return the current state of the encoder. """ return 0 def setstate(self, state): """ Set the current state of the encoder. state must have been returned by getstate(). """ class BufferedIncrementalEncoder(IncrementalEncoder): """ This subclass of IncrementalEncoder can be used as the baseclass for an incremental encoder if the encoder must keep some of the output in a buffer between calls to encode(). """ def __init__(self, errors='strict'): IncrementalEncoder.__init__(self, errors) # unencoded input that is kept between calls to encode() self.buffer = "" def _buffer_encode(self, input, errors, final): # Overwrite this method in subclasses: It must encode input # and return an (output, length consumed) tuple raise NotImplementedError def encode(self, input, final=False): # encode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_encode(data, self.errors, final) # keep unencoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalEncoder.reset(self) self.buffer = "" def getstate(self): return self.buffer or 0 def setstate(self, state): self.buffer = state or "" class IncrementalDecoder(object): """ An IncrementalDecoder decodes an input in multiple steps. The input can be passed piece by piece to the decode() method. The IncrementalDecoder remembers the state of the decoding process between calls to decode(). """ def __init__(self, errors='strict'): """ Create an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors def decode(self, input, final=False): """ Decode input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Reset the decoder to the initial state. """ def getstate(self): """ Return the current state of the decoder. This must be a (buffered_input, additional_state_info) tuple. buffered_input must be a bytes object containing bytes that were passed to decode() that have not yet been converted. additional_state_info must be a non-negative integer representing the state of the decoder WITHOUT yet having processed the contents of buffered_input. In the initial state and after reset(), getstate() must return (b"", 0). """ return (b"", 0) def setstate(self, state): """ Set the current state of the decoder. state must have been returned by getstate(). The effect of setstate((b"", 0)) must be equivalent to reset(). """ class BufferedIncrementalDecoder(IncrementalDecoder): """ This subclass of IncrementalDecoder can be used as the baseclass for an incremental decoder if the decoder must be able to handle incomplete byte sequences. """ def __init__(self, errors='strict'): IncrementalDecoder.__init__(self, errors) # undecoded input that is kept between calls to decode() self.buffer = b"" def _buffer_decode(self, input, errors, final): # Overwrite this method in subclasses: It must decode input # and return an (output, length consumed) tuple raise NotImplementedError def decode(self, input, final=False): # decode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_decode(data, self.errors, final) # keep undecoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalDecoder.reset(self) self.buffer = b"" def getstate(self): # additional state info is always 0 return (self.buffer, 0) def setstate(self, state): # ignore additional state info self.buffer = state[0] # # The StreamWriter and StreamReader class provide generic working # interfaces which can be used to implement new encoding submodules # very easily. See encodings/utf_8.py for an example on how this is # done. # class StreamWriter(Codec): def __init__(self, stream, errors='strict'): """ Creates a StreamWriter instance. stream must be a file-like object open for writing. The StreamWriter may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character 'xmlcharrefreplace' - Replace with the appropriate XML character reference. 'backslashreplace' - Replace with backslashed escape sequences. 'namereplace' - Replace with \\N{...} escape sequences. The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors def write(self, object): """ Writes the object's contents encoded to self.stream. """ data, consumed = self.encode(object, self.errors) self.stream.write(data) def writelines(self, list): """ Writes the concatenated list of strings to the stream using .write(). """ self.write(''.join(list)) def reset(self): """ Flushes and resets the codec buffers used for keeping state. Calling this method should ensure that the data on the output is put into a clean state, that allows appending of new fresh data without having to rescan the whole stream to recover state. """ pass def seek(self, offset, whence=0): self.stream.seek(offset, whence) if whence == 0 and offset == 0: self.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReader(Codec): charbuffertype = str def __init__(self, stream, errors='strict'): """ Creates a StreamReader instance. stream must be a file-like object open for reading. The StreamReader may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character 'backslashreplace' - Replace with backslashed escape sequences; The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors self.bytebuffer = b"" self._empty_charbuffer = self.charbuffertype() self.charbuffer = self._empty_charbuffer self.linebuffer = None def decode(self, input, errors='strict'): raise NotImplementedError def read(self, size=-1, chars=-1, firstline=False): """ Decodes data from the stream self.stream and returns the resulting object. chars indicates the number of decoded code points or bytes to return. read() will never return more data than requested, but it might return less, if there is not enough available. size indicates the approximate maximum number of decoded bytes or code points to read for decoding. The decoder can modify this setting as appropriate. The default value -1 indicates to read and decode as much as possible. size is intended to prevent having to decode huge files in one step. If firstline is true, and a UnicodeDecodeError happens after the first line terminator in the input only the first line will be returned, the rest of the input will be kept until the next call to read(). The method should use a greedy read strategy, meaning that it should read as much data as is allowed within the definition of the encoding and the given size, e.g. if optional encoding endings or state markers are available on the stream, these should be read too. """ # If we have lines cached, first merge them back into characters if self.linebuffer: self.charbuffer = self._empty_charbuffer.join(self.linebuffer) self.linebuffer = None if chars < 0: # For compatibility with other read() methods that take a # single argument chars = size # read until we get the required number of characters (if available) while True: # can the request be satisfied from the character buffer? if chars >= 0: if len(self.charbuffer) >= chars: break # we need more data if size < 0: newdata = self.stream.read() else: newdata = self.stream.read(size) # decode bytes (those remaining from the last call included) data = self.bytebuffer + newdata if not data: break try: newchars, decodedbytes = self.decode(data, self.errors) except UnicodeDecodeError as exc: if firstline: newchars, decodedbytes = \ self.decode(data[:exc.start], self.errors) lines = newchars.splitlines(keepends=True) if len(lines)<=1: raise else: raise # keep undecoded bytes until the next call self.bytebuffer = data[decodedbytes:] # put new characters in the character buffer self.charbuffer += newchars # there was no data available if not newdata: break if chars < 0: # Return everything we've got result = self.charbuffer self.charbuffer = self._empty_charbuffer else: # Return the first chars characters result = self.charbuffer[:chars] self.charbuffer = self.charbuffer[chars:] return result def readline(self, size=None, keepends=True): """ Read one line from the input stream and return the decoded data. size, if given, is passed as size argument to the read() method. """ # If we have lines cached from an earlier read, return # them unconditionally if self.linebuffer: line = self.linebuffer[0] del self.linebuffer[0] if len(self.linebuffer) == 1: # revert to charbuffer mode; we might need more data # next time self.charbuffer = self.linebuffer[0] self.linebuffer = None if not keepends: line = line.splitlines(keepends=False)[0] return line readsize = size or 72 line = self._empty_charbuffer # If size is given, we call read() only once while True: data = self.read(readsize, firstline=True) if data: # If we're at a "\r" read one extra character (which might # be a "\n") to get a proper line ending. If the stream is # temporarily exhausted we return the wrong line ending. if (isinstance(data, str) and data.endswith("\r")) or \ (isinstance(data, bytes) and data.endswith(b"\r")): data += self.read(size=1, chars=1) line += data lines = line.splitlines(keepends=True) if lines: if len(lines) > 1: # More than one line result; the first line is a full line # to return line = lines[0] del lines[0] if len(lines) > 1: # cache the remaining lines lines[-1] += self.charbuffer self.linebuffer = lines self.charbuffer = None else: # only one remaining line, put it back into charbuffer self.charbuffer = lines[0] + self.charbuffer if not keepends: line = line.splitlines(keepends=False)[0] break line0withend = lines[0] line0withoutend = lines[0].splitlines(keepends=False)[0] if line0withend != line0withoutend: # We really have a line end # Put the rest back together and keep it until the next call self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ self.charbuffer if keepends: line = line0withend else: line = line0withoutend break # we didn't get anything or this was our only try if not data or size is not None: if line and not keepends: line = line.splitlines(keepends=False)[0] break if readsize < 8000: readsize *= 2 return line def readlines(self, sizehint=None, keepends=True): """ Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. """ data = self.read() return data.splitlines(keepends) def reset(self): """ Resets the codec buffers used for keeping state. Note that no stream repositioning should take place. This method is primarily intended to be able to recover from decoding errors. """ self.bytebuffer = b"" self.charbuffer = self._empty_charbuffer self.linebuffer = None def seek(self, offset, whence=0): """ Set the input stream's current position. Resets the codec buffers used for keeping state. """ self.stream.seek(offset, whence) self.reset() def __next__(self): """ Return the next decoded line from the input stream.""" line = self.readline() if line: return line raise StopIteration def __iter__(self): return self def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReaderWriter: """ StreamReaderWriter instances allow wrapping streams which work in both read and write modes. The design is such that one can use the factory functions returned by the codec.lookup() function to construct the instance. """ # Optional attributes set by the file wrappers below encoding = 'unknown' def __init__(self, stream, Reader, Writer, errors='strict'): """ Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): return self.reader.read(size) def readline(self, size=None): return self.reader.readline(size) def readlines(self, sizehint=None): return self.reader.readlines(sizehint) def __next__(self): """ Return the next decoded line from the input stream.""" return next(self.reader) def __iter__(self): return self def write(self, data): return self.writer.write(data) def writelines(self, list): return self.writer.writelines(list) def reset(self): self.reader.reset() self.writer.reset() def seek(self, offset, whence=0): self.stream.seek(offset, whence) self.reader.reset() if whence == 0 and offset == 0: self.writer.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) # these are needed to make "with StreamReaderWriter(...)" work properly def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamRecoder: """ StreamRecoder instances translate data from one encoding to another. They use the complete set of APIs returned by the codecs.lookup() function to implement their task. Data written to the StreamRecoder is first decoded into an intermediate format (depending on the "decode" codec) and then written to the underlying stream using an instance of the provided Writer class. In the other direction, data is read from the underlying stream using a Reader instance and then encoded and returned to the caller. """ # Optional attributes set by the file wrappers below data_encoding = 'unknown' file_encoding = 'unknown' def __init__(self, stream, encode, decode, Reader, Writer, errors='strict'): """ Creates a StreamRecoder instance which implements a two-way conversion: encode and decode work on the frontend (the data visible to .read() and .write()) while Reader and Writer work on the backend (the data in stream). You can use these objects to do transparent transcodings from e.g. latin-1 to utf-8 and back. stream must be a file-like object. encode and decode must adhere to the Codec interface; Reader and Writer must be factory functions or classes providing the StreamReader and StreamWriter interfaces resp. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.encode = encode self.decode = decode self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): data = self.reader.read(size) data, bytesencoded = self.encode(data, self.errors) return data def readline(self, size=None): if size is None: data = self.reader.readline() else: data = self.reader.readline(size) data, bytesencoded = self.encode(data, self.errors) return data def readlines(self, sizehint=None): data = self.reader.read() data, bytesencoded = self.encode(data, self.errors) return data.splitlines(keepends=True) def __next__(self): """ Return the next decoded line from the input stream.""" data = next(self.reader) data, bytesencoded = self.encode(data, self.errors) return data def __iter__(self): return self def write(self, data): data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def writelines(self, list): data = b''.join(list) data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def reset(self): self.reader.reset() self.writer.reset() def seek(self, offset, whence=0): # Seeks must be propagated to both the readers and writers # as they might need to reset their internal buffers. self.reader.seek(offset, whence) self.writer.seek(offset, whence) def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### Shortcuts def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): """ Open an encoded file using the given mode and return a wrapped version providing transparent encoding/decoding. Note: The wrapped version will only accept the object format defined by the codecs, i.e. Unicode objects for most builtin codecs. Output is also codec dependent and will usually be Unicode as well. Underlying encoded files are always opened in binary mode. The default file mode is 'r', meaning to open the file in read mode. encoding specifies the encoding which is to be used for the file. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. buffering has the same meaning as for the builtin open() API. It defaults to -1 which means that the default buffer size will be used. The returned wrapped file object provides an extra attribute .encoding which allows querying the used encoding. This attribute is only available if an encoding was specified as parameter. """ if encoding is not None and \ 'b' not in mode: # Force opening of the file in binary mode mode = mode + 'b' file = builtins.open(filename, mode, buffering) if encoding is None: return file try: info = lookup(encoding) srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) # Add attributes to simplify introspection srw.encoding = encoding return srw except: file.close() raise def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): """ Return a wrapped version of file which provides transparent encoding translation. Data written to the wrapped file is decoded according to the given data_encoding and then encoded to the underlying file using file_encoding. The intermediate data type will usually be Unicode but depends on the specified codecs. Bytes read from the file are decoded using file_encoding and then passed back to the caller encoded using data_encoding. If file_encoding is not given, it defaults to data_encoding. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. The returned wrapped file object provides two extra attributes .data_encoding and .file_encoding which reflect the given parameters of the same name. The attributes can be used for introspection by Python programs. """ if file_encoding is None: file_encoding = data_encoding data_info = lookup(data_encoding) file_info = lookup(file_encoding) sr = StreamRecoder(file, data_info.encode, data_info.decode, file_info.streamreader, file_info.streamwriter, errors) # Add attributes to simplify introspection sr.data_encoding = data_encoding sr.file_encoding = file_encoding return sr ### Helpers for codec lookup def getencoder(encoding): """ Lookup up the codec for the given encoding and return its encoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).encode def getdecoder(encoding): """ Lookup up the codec for the given encoding and return its decoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).decode def getincrementalencoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalEncoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental encoder. """ encoder = lookup(encoding).incrementalencoder if encoder is None: raise LookupError(encoding) return encoder def getincrementaldecoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalDecoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental decoder. """ decoder = lookup(encoding).incrementaldecoder if decoder is None: raise LookupError(encoding) return decoder def getreader(encoding): """ Lookup up the codec for the given encoding and return its StreamReader class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamreader def getwriter(encoding): """ Lookup up the codec for the given encoding and return its StreamWriter class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamwriter def iterencode(iterator, encoding, errors='strict', **kwargs): """ Encoding iterator. Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. """ encoder = getincrementalencoder(encoding)(errors, **kwargs) for input in iterator: output = encoder.encode(input) if output: yield output output = encoder.encode("", True) if output: yield output def iterdecode(iterator, encoding, errors='strict', **kwargs): """ Decoding iterator. Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. """ decoder = getincrementaldecoder(encoding)(errors, **kwargs) for input in iterator: output = decoder.decode(input) if output: yield output output = decoder.decode(b"", True) if output: yield output ### Helpers for charmap-based codecs def make_identity_dict(rng): """ make_identity_dict(rng) -> dict Return a dictionary where elements of the rng sequence are mapped to themselves. """ return {i:i for i in rng} def make_encoding_map(decoding_map): """ Creates an encoding map from a decoding map. If a target mapping in the decoding map occurs multiple times, then that target is mapped to None (undefined mapping), causing an exception when encountered by the charmap codec during translation. One example where this happens is cp875.py which decodes multiple character to \\u001a. """ m = {} for k,v in decoding_map.items(): if not v in m: m[v] = k else: m[v] = None return m ### error handlers try: strict_errors = lookup_error("strict") ignore_errors = lookup_error("ignore") replace_errors = lookup_error("replace") xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") backslashreplace_errors = lookup_error("backslashreplace") namereplace_errors = lookup_error("namereplace") except LookupError: # In --disable-unicode builds, these error handler are missing strict_errors = None ignore_errors = None replace_errors = None xmlcharrefreplace_errors = None backslashreplace_errors = None namereplace_errors = None # Tell modulefinder that using codecs probably needs the encodings # package _false = 0 if _false: import encodings ### Tests if __name__ == '__main__': # Make stdout translate Latin-1 output into UTF-8 output sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') # Have stdin translate Latin-1 input into UTF-8 input sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') ================================================ FILE: rd/usr/lib/python3.8/codeop.py ================================================ r"""Utilities to compile possibly incomplete Python source code. This module provides two interfaces, broadly similar to the builtin function compile(), which take program text, a filename and a 'mode' and: - Return code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). Approach: First, check if the source consists entirely of blank lines and comments; if so, replace it with 'pass', because the built-in parser doesn't always do the right thing for these. Compile three times: as is, with \n, and with \n\n appended. If it compiles as is, it's complete. If it compiles with one \n appended, we expect more. If it doesn't compile either way, we compare the error we get when compiling with \n or \n\n appended. If the errors are the same, the code is broken. But if the errors are different, we expect more. Not intuitive; not even guaranteed to hold in future releases; but this matches the compiler's behavior from Python 1.4 through 2.2, at least. Caveat: It is possible (but not likely) that the parser stops parsing with a successful outcome before reaching the end of the source; in this case, trailing symbols may be ignored instead of causing an error. For example, a backslash followed by two newlines may be followed by arbitrary garbage. This will be fixed once the API for the parser is better. The two interfaces are: compile_command(source, filename, symbol): Compiles a single command in the manner described above. CommandCompiler(): Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force. The module also provides another class: Compile(): Instances of this class act like the built-in function compile, but with 'memory' in the sense described above. """ import __future__ import warnings _features = [getattr(__future__, fname) for fname in __future__.all_feature_names] __all__ = ["compile_command", "Compile", "CommandCompiler"] PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h def _maybe_compile(compiler, source, filename, symbol): # Check for source consisting of only blank lines and comments for line in source.split("\n"): line = line.strip() if line and line[0] != '#': break # Leave it alone else: if symbol != "eval": source = "pass" # Replace it with a 'pass' statement err = err1 = err2 = None code = code1 = code2 = None try: code = compiler(source, filename, symbol) except SyntaxError as err: pass # Suppress warnings after the first compile to avoid duplication. with warnings.catch_warnings(): warnings.simplefilter("ignore") try: code1 = compiler(source + "\n", filename, symbol) except SyntaxError as e: err1 = e try: code2 = compiler(source + "\n\n", filename, symbol) except SyntaxError as e: err2 = e try: if code: return code if not code1 and repr(err1) == repr(err2): raise err1 finally: err1 = err2 = None def _compile(source, filename, symbol): return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT) def compile_command(source, filename="", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "" symbol -- optional grammar start symbol; "single" (default), "exec" or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(_compile, source, filename, symbol) class Compile: """Instances of this class behave much like the built-in compile function, but if one is used to compile text containing a future statement, it "remembers" and compiles all subsequent program texts with the statement in force.""" def __init__(self): self.flags = PyCF_DONT_IMPLY_DEDENT def __call__(self, source, filename, symbol): codeob = compile(source, filename, symbol, self.flags, 1) for feature in _features: if codeob.co_flags & feature.compiler_flag: self.flags |= feature.compiler_flag return codeob class CommandCompiler: """Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force.""" def __init__(self,): self.compiler = Compile() def __call__(self, source, filename="", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "" symbol -- optional grammar start symbol; "single" (default) or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(self.compiler, source, filename, symbol) ================================================ FILE: rd/usr/lib/python3.8/collections/__init__.py ================================================ '''This module implements specialized container datatypes providing alternatives to Python's general purpose built-in containers, dict, list, set, and tuple. * namedtuple factory function for creating tuple subclasses with named fields * deque list-like container with fast appends and pops on either end * ChainMap dict-like class for creating a single view of multiple mappings * Counter dict subclass for counting hashable objects * OrderedDict dict subclass that remembers the order entries were added * defaultdict dict subclass that calls a factory function to supply missing values * UserDict wrapper around dictionary objects for easier dict subclassing * UserList wrapper around list objects for easier list subclassing * UserString wrapper around string objects for easier string subclassing ''' __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] import _collections_abc from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from _weakref import proxy as _proxy from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from reprlib import recursive_repr as _recursive_repr try: from _collections import deque except ImportError: pass else: _collections_abc.MutableSequence.register(deque) try: from _collections import defaultdict except ImportError: pass def __getattr__(name): # For backwards compatibility, continue to make the collections ABCs # through Python 3.6 available through the collections module. # Note, no new collections ABCs were added in Python 3.7 if name in _collections_abc.__all__: obj = getattr(_collections_abc, name) import warnings warnings.warn("Using or importing the ABCs from 'collections' instead " "of from 'collections.abc' is deprecated since Python 3.3, " "and in 3.9 it will stop working", DeprecationWarning, stacklevel=2) globals()[name] = obj return obj raise AttributeError(f'module {__name__!r} has no attribute {name!r}') ################################################################################ ### OrderedDict ################################################################################ class _OrderedDictKeysView(_collections_abc.KeysView): def __reversed__(self): yield from reversed(self._mapping) class _OrderedDictItemsView(_collections_abc.ItemsView): def __reversed__(self): for key in reversed(self._mapping): yield (key, self._mapping[key]) class _OrderedDictValuesView(_collections_abc.ValuesView): def __reversed__(self): for key in reversed(self._mapping): yield self._mapping[key] class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __init__(self, other=(), /, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(other, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''Remove and return a (key, value) pair from the dictionary. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last is false). Raise KeyError if the element does not exist. ''' link = self.__map[key] link_prev = link.prev link_next = link.next soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = root.prev link.prev = last link.next = root root.prev = soft_link last.next = link else: first = root.next link.prev = root link.next = first first.prev = soft_link root.next = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = _collections_abc.MutableMapping.update def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return _OrderedDictKeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return _OrderedDictItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return _OrderedDictValuesView(self) __ne__ = _collections_abc.MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): '''Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ''' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) return self.__class__, (), inst_dict or None, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''Create a new ordered dictionary with keys from iterable and values set to value. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(map(_eq, self, other)) return dict.__eq__(self, other) try: from _collections import OrderedDict except ImportError: # Leave the pure Python version in place. pass ################################################################################ ### namedtuple ################################################################################ try: from _collections import _tuplegetter except ImportError: _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = repr(field_names).replace("'", "")[1:-1] repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))' namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} # Note: exec() has the side-effect of interning the field names exec(s, namespace) __new__ = namespace['__new__'] __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in (__new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, # alternate spelling for backward compatibility '_fields_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result ######################################################################## ### Counter ######################################################################## def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1 try: # Load C helper function if available from _collections import _count_elements except ImportError: pass class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(self, iterable=None, /, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' super(Counter, self).__init__() self.update(iterable, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('b', 2), ('r', 2)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.items(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because the semantics # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). # Initializing counters to zero values isn't necessary because zero # is already the default value for counter lookups. Initializing # to one is easily accomplished with Counter(set(iterable)). For # more exotic cases, create a dictionary first using a dictionary # comprehension or dict.fromkeys(). raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, /, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, _collections_abc.Mapping): if self: self_get = self.get for elem, count in iterable.items(): self[elem] = count + self_get(elem, 0) else: super(Counter, self).update(iterable) # fast path when counter is empty else: _count_elements(self, iterable) if kwds: self.update(kwds) def subtract(self, iterable=None, /, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if iterable is not None: self_get = self.get if isinstance(iterable, _collections_abc.Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem) def __repr__(self): if not self: return '%s()' % self.__class__.__name__ try: items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) except TypeError: # handle case where values are not orderable return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() # # Rich comparison operators for multiset subset and superset tests # are deliberately omitted due to semantic conflicts with the # existing inherited dict equality method. Subset and superset # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q; # however, that would not be the case for p=Counter(a=1, b=0) # and q=Counter(a=1) where the dictionaries are not equal. def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result def __pos__(self): 'Adds an empty counter, effectively stripping negative and zero counts' result = Counter() for elem, count in self.items(): if count > 0: result[elem] = count return result def __neg__(self): '''Subtracts from an empty counter. Strips positive and zero counts, and flips the sign on negative counts. ''' result = Counter() for elem, count in self.items(): if count < 0: result[elem] = 0 - count return result def _keep_positive(self): '''Internal method to strip elements with a negative or zero count''' nonpositive = [elem for elem, count in self.items() if not count > 0] for elem in nonpositive: del self[elem] return self def __iadd__(self, other): '''Inplace add from another counter, keeping only positive counts. >>> c = Counter('abbb') >>> c += Counter('bcc') >>> c Counter({'b': 4, 'c': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] += count return self._keep_positive() def __isub__(self, other): '''Inplace subtract counter, but keep only results with positive counts. >>> c = Counter('abbbc') >>> c -= Counter('bccd') >>> c Counter({'b': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] -= count return self._keep_positive() def __ior__(self, other): '''Inplace union is the maximum of value from either counter. >>> c = Counter('abbb') >>> c |= Counter('bcc') >>> c Counter({'b': 3, 'c': 2, 'a': 1}) ''' for elem, other_count in other.items(): count = self[elem] if other_count > count: self[elem] = other_count return self._keep_positive() def __iand__(self, other): '''Inplace intersection is the minimum of corresponding counts. >>> c = Counter('abbb') >>> c &= Counter('bcc') >>> c Counter({'b': 1}) ''' for elem, count in self.items(): other_count = other[elem] if other_count < count: self[elem] = other_count return self._keep_positive() ######################################################################## ### ChainMap ######################################################################## class ChainMap(_collections_abc.MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): d = {} for mapping in reversed(self.maps): d.update(mapping) # reuses stored hash values if possible return iter(d) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. ''' if m is None: m = {} return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() ################################################################################ ### UserDict ################################################################################ class UserDict(_collections_abc.MutableMapping): # Start by filling-out the abstract methods def __init__(*args, **kwargs): if not args: raise TypeError("descriptor '__init__' of 'UserDict' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) if args: dict = args[0] elif 'dict' in kwargs: dict = kwargs.pop('dict') import warnings warnings.warn("Passing 'dict' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: dict = None self.data = {} if dict is not None: self.update(dict) if kwargs: self.update(kwargs) __init__.__text_signature__ = '($self, dict=None, /, **kwargs)' def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() return inst def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(_collections_abc.MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): if isinstance(i, slice): return self.__class__(self.data[i]) else: return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"][:] return inst def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def clear(self): self.data.clear() def copy(self): return self.__class__(self) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(_collections_abc.Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __getnewargs__(self): return (self.data[:],) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) def __rmod__(self, template): return self.__class__(str(template) % self) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def casefold(self): return self.__class__(self.data.casefold()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def encode(self, encoding='utf-8', errors='strict'): encoding = 'utf-8' if encoding is None else encoding errors = 'strict' if errors is None else errors return self.data.encode(encoding, errors) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, /, *args, **kwds): return self.data.format(*args, **kwds) def format_map(self, mapping): return self.data.format_map(mapping) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isascii(self): return self.data.isascii() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isprintable(self): return self.data.isprintable() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) maketrans = str.maketrans def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=False): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width)) ================================================ FILE: rd/usr/lib/python3.8/collections/abc.py ================================================ from _collections_abc import * from _collections_abc import __all__ ================================================ FILE: rd/usr/lib/python3.8/colorsys.py ================================================ """Conversion functions between RGB and other color systems. This modules provides two functions for each color system ABC: rgb_to_abc(r, g, b) --> a, b, c abc_to_rgb(a, b, c) --> r, g, b All inputs and outputs are triples of floats in the range [0.0...1.0] (with the exception of I and Q, which covers a slightly larger range). Inputs outside the valid range may cause exceptions or invalid outputs. Supported color systems: RGB: Red, Green, Blue components YIQ: Luminance, Chrominance (used by composite video signals) HLS: Hue, Luminance, Saturation HSV: Hue, Saturation, Value """ # References: # http://en.wikipedia.org/wiki/YIQ # http://en.wikipedia.org/wiki/HLS_color_space # http://en.wikipedia.org/wiki/HSV_color_space __all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", "rgb_to_hsv","hsv_to_rgb"] # Some floating point constants ONE_THIRD = 1.0/3.0 ONE_SIXTH = 1.0/6.0 TWO_THIRD = 2.0/3.0 # YIQ: used by composite video signals (linear combinations of RGB) # Y: perceived grey level (0.0 == black, 1.0 == white) # I, Q: color components # # There are a great many versions of the constants used in these formulae. # The ones in this library uses constants from the FCC version of NTSC. def rgb_to_yiq(r, g, b): y = 0.30*r + 0.59*g + 0.11*b i = 0.74*(r-y) - 0.27*(b-y) q = 0.48*(r-y) + 0.41*(b-y) return (y, i, q) def yiq_to_rgb(y, i, q): # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 r = y + 0.9468822170900693*i + 0.6235565819861433*q g = y - 0.27478764629897834*i - 0.6356910791873801*q b = y - 1.1085450346420322*i + 1.7090069284064666*q if r < 0.0: r = 0.0 if g < 0.0: g = 0.0 if b < 0.0: b = 0.0 if r > 1.0: r = 1.0 if g > 1.0: g = 1.0 if b > 1.0: b = 1.0 return (r, g, b) # HLS: Hue, Luminance, Saturation # H: position in the spectrum # L: color lightness # S: color saturation def rgb_to_hls(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) # XXX Can optimize (maxc+minc) and (maxc-minc) l = (minc+maxc)/2.0 if minc == maxc: return 0.0, l, 0.0 if l <= 0.5: s = (maxc-minc) / (maxc+minc) else: s = (maxc-minc) / (2.0-maxc-minc) rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, l, s def hls_to_rgb(h, l, s): if s == 0.0: return l, l, l if l <= 0.5: m2 = l * (1.0+s) else: m2 = l+s-(l*s) m1 = 2.0*l - m2 return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) def _v(m1, m2, hue): hue = hue % 1.0 if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0 if hue < 0.5: return m2 if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 return m1 # HSV: Hue, Saturation, Value # H: position in the spectrum # S: color saturation ("purity") # V: color brightness def rgb_to_hsv(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) v = maxc if minc == maxc: return 0.0, 0.0, v s = (maxc-minc) / maxc rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, s, v def hsv_to_rgb(h, s, v): if s == 0.0: return v, v, v i = int(h*6.0) # XXX assume int() truncates! f = (h*6.0) - i p = v*(1.0 - s) q = v*(1.0 - s*f) t = v*(1.0 - s*(1.0-f)) i = i%6 if i == 0: return v, t, p if i == 1: return q, v, p if i == 2: return p, v, t if i == 3: return p, q, v if i == 4: return t, p, v if i == 5: return v, p, q # Cannot get here ================================================ FILE: rd/usr/lib/python3.8/compileall.py ================================================ """Module/script to byte-compile all .py files to .pyc files. When called as a script with arguments, this compiles the directories given as arguments recursively; the -l option prevents it from recursing into directories. Without arguments, if compiles all modules on sys.path, without recursing into subdirectories. (Even though it should do so for packages -- for now, you'll have to deal with packages separately.) See module py_compile for details of the actual byte-compilation. """ import os import sys import importlib.util import py_compile import struct from functools import partial __all__ = ["compile_dir","compile_file","compile_path"] def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0): if quiet < 2 and isinstance(dir, os.PathLike): dir = os.fspath(dir) if not quiet: print('Listing {!r}...'.format(dir)) try: names = os.listdir(dir) except OSError: if quiet < 2: print("Can't list {!r}".format(dir)) names = [] names.sort() for name in names: if name == '__pycache__': continue fullname = os.path.join(dir, name) if ddir is not None: dfile = os.path.join(ddir, name) else: dfile = None if not os.path.isdir(fullname): yield fullname, ddir elif (maxlevels > 0 and name != os.curdir and name != os.pardir and os.path.isdir(fullname) and not os.path.islink(fullname)): yield from _walk_dir(fullname, ddir=dfile, maxlevels=maxlevels - 1, quiet=quiet) def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, quiet=0, legacy=False, optimize=-1, workers=1, invalidation_mode=None): """Byte-compile all modules in the given directory tree. Arguments (only dir is required): dir: the directory to byte-compile maxlevels: maximum recursion level (default 10) ddir: the directory that will be prepended to the path to the file as it is compiled into each byte-code file. force: if True, force compilation, even if timestamps are up-to-date quiet: full output with False or 0, errors only with 1, no output with 2 legacy: if True, produce legacy pyc paths instead of PEP 3147 paths optimize: optimization level or -1 for level of the interpreter workers: maximum number of parallel workers invalidation_mode: how the up-to-dateness of the pyc will be checked """ ProcessPoolExecutor = None if workers < 0: raise ValueError('workers must be greater or equal to 0') if workers != 1: try: # Only import when needed, as low resource platforms may # fail to import it from concurrent.futures import ProcessPoolExecutor except ImportError: workers = 1 files_and_ddirs = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels, ddir=ddir) success = True if workers != 1 and ProcessPoolExecutor is not None: # If workers == 0, let ProcessPoolExecutor choose workers = workers or None with ProcessPoolExecutor(max_workers=workers) as executor: results = executor.map( partial(_compile_file_tuple, force=force, rx=rx, quiet=quiet, legacy=legacy, optimize=optimize, invalidation_mode=invalidation_mode, ), files_and_ddirs) success = min(results, default=True) else: for file, dfile in files_and_ddirs: if not compile_file(file, dfile, force, rx, quiet, legacy, optimize, invalidation_mode): success = False return success def _compile_file_tuple(file_and_dfile, **kwargs): """Needs to be toplevel for ProcessPoolExecutor.""" file, dfile = file_and_dfile return compile_file(file, dfile, **kwargs) def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, legacy=False, optimize=-1, invalidation_mode=None): """Byte-compile one file. Arguments (only fullname is required): fullname: the file to byte-compile ddir: if given, the directory name compiled in to the byte-code file. force: if True, force compilation, even if timestamps are up-to-date quiet: full output with False or 0, errors only with 1, no output with 2 legacy: if True, produce legacy pyc paths instead of PEP 3147 paths optimize: optimization level or -1 for level of the interpreter invalidation_mode: how the up-to-dateness of the pyc will be checked """ success = True if quiet < 2 and isinstance(fullname, os.PathLike): fullname = os.fspath(fullname) name = os.path.basename(fullname) if ddir is not None: dfile = os.path.join(ddir, name) else: dfile = None if rx is not None: mo = rx.search(fullname) if mo: return success if os.path.isfile(fullname): if legacy: cfile = fullname + 'c' else: if optimize >= 0: opt = optimize if optimize >= 1 else '' cfile = importlib.util.cache_from_source( fullname, optimization=opt) else: cfile = importlib.util.cache_from_source(fullname) cache_dir = os.path.dirname(cfile) head, tail = name[:-3], name[-3:] if tail == '.py': if not force: try: mtime = int(os.stat(fullname).st_mtime) expect = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, 0, mtime) with open(cfile, 'rb') as chandle: actual = chandle.read(12) if expect == actual: return success except OSError: pass if not quiet: print('Compiling {!r}...'.format(fullname)) try: ok = py_compile.compile(fullname, cfile, dfile, True, optimize=optimize, invalidation_mode=invalidation_mode) except py_compile.PyCompileError as err: success = False if quiet >= 2: return success elif quiet: print('*** Error compiling {!r}...'.format(fullname)) else: print('*** ', end='') # escape non-printable characters in msg msg = err.msg.encode(sys.stdout.encoding, errors='backslashreplace') msg = msg.decode(sys.stdout.encoding) print(msg) except (SyntaxError, UnicodeError, OSError) as e: success = False if quiet >= 2: return success elif quiet: print('*** Error compiling {!r}...'.format(fullname)) else: print('*** ', end='') print(e.__class__.__name__ + ':', e) else: if ok == 0: success = False return success def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, legacy=False, optimize=-1, invalidation_mode=None): """Byte-compile all module on sys.path. Arguments (all optional): skip_curdir: if true, skip current directory (default True) maxlevels: max recursion level (default 0) force: as for compile_dir() (default False) quiet: as for compile_dir() (default 0) legacy: as for compile_dir() (default False) optimize: as for compile_dir() (default -1) invalidation_mode: as for compiler_dir() """ success = True for dir in sys.path: if (not dir or dir == os.curdir) and skip_curdir: if quiet < 2: print('Skipping current directory') else: success = success and compile_dir( dir, maxlevels, None, force, quiet=quiet, legacy=legacy, optimize=optimize, invalidation_mode=invalidation_mode, ) return success def main(): """Script main program.""" import argparse parser = argparse.ArgumentParser( description='Utilities to support installing Python libraries.') parser.add_argument('-l', action='store_const', const=0, default=10, dest='maxlevels', help="don't recurse into subdirectories") parser.add_argument('-r', type=int, dest='recursion', help=('control the maximum recursion level. ' 'if `-l` and `-r` options are specified, ' 'then `-r` takes precedence.')) parser.add_argument('-f', action='store_true', dest='force', help='force rebuild even if timestamps are up to date') parser.add_argument('-q', action='count', dest='quiet', default=0, help='output only error messages; -qq will suppress ' 'the error messages as well.') parser.add_argument('-b', action='store_true', dest='legacy', help='use legacy (pre-PEP3147) compiled file locations') parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, help=('directory to prepend to file paths for use in ' 'compile-time tracebacks and in runtime ' 'tracebacks in cases where the source file is ' 'unavailable')) parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, help=('skip files matching the regular expression; ' 'the regexp is searched for in the full path ' 'of each file considered for compilation')) parser.add_argument('-i', metavar='FILE', dest='flist', help=('add all the files and directories listed in ' 'FILE to the list considered for compilation; ' 'if "-", names are read from stdin')) parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', help=('zero or more file and directory names ' 'to compile; if no arguments given, defaults ' 'to the equivalent of -l sys.path')) parser.add_argument('-j', '--workers', default=1, type=int, help='Run compileall concurrently') invalidation_modes = [mode.name.lower().replace('_', '-') for mode in py_compile.PycInvalidationMode] parser.add_argument('--invalidation-mode', choices=sorted(invalidation_modes), help=('set .pyc invalidation mode; defaults to ' '"checked-hash" if the SOURCE_DATE_EPOCH ' 'environment variable is set, and ' '"timestamp" otherwise.')) args = parser.parse_args() compile_dests = args.compile_dest if args.rx: import re args.rx = re.compile(args.rx) if args.recursion is not None: maxlevels = args.recursion else: maxlevels = args.maxlevels # if flist is provided then load it if args.flist: try: with (sys.stdin if args.flist=='-' else open(args.flist)) as f: for line in f: compile_dests.append(line.strip()) except OSError: if args.quiet < 2: print("Error reading file list {}".format(args.flist)) return False if args.invalidation_mode: ivl_mode = args.invalidation_mode.replace('-', '_').upper() invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] else: invalidation_mode = None success = True try: if compile_dests: for dest in compile_dests: if os.path.isfile(dest): if not compile_file(dest, args.ddir, args.force, args.rx, args.quiet, args.legacy, invalidation_mode=invalidation_mode): success = False else: if not compile_dir(dest, maxlevels, args.ddir, args.force, args.rx, args.quiet, args.legacy, workers=args.workers, invalidation_mode=invalidation_mode): success = False return success else: return compile_path(legacy=args.legacy, force=args.force, quiet=args.quiet, invalidation_mode=invalidation_mode) except KeyboardInterrupt: if args.quiet < 2: print("\n[interrupted]") return False return True if __name__ == '__main__': exit_status = int(not main()) sys.exit(exit_status) ================================================ FILE: rd/usr/lib/python3.8/configparser.py ================================================ """Configuration file parser. A configuration file consists of sections, lead by a "[section]" header, and followed by "name: value" entries, with continuations and such in the style of RFC 822. Intrinsic defaults can be specified by passing them into the ConfigParser constructor as a dictionary. class: ConfigParser -- responsible for parsing a list of configuration files, and managing the parsed database. methods: __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section='DEFAULT', interpolation=, converters=): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. When `dict_type' is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. When `delimiters' is given, it will be used as the set of substrings that divide keys from values. When `comment_prefixes' is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. When `inline_comment_prefixes' is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. When `empty_lines_in_values' is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. When `default_section' is given, the name of the special section is named accordingly. By default it is called ``"DEFAULT"`` but this can be customized to point to any other valid section name. Its current value can be retrieved using the ``parser_instance.default_section`` attribute and may be modified at runtime. When `interpolation` is given, it should be an Interpolation subclass instance. It will be used as the handler for option value pre-processing when using getters. RawConfigParser objects don't do any sort of interpolation, whereas ConfigParser uses an instance of BasicInterpolation. The library also provides a ``zc.buildbot`` inspired ExtendedInterpolation implementation. When `converters` is given, it should be a dictionary where each key represents the name of a type converter and each value is a callable implementing the conversion from string to the desired datatype. Every converter gets its corresponding get*() method on the parser object and section proxies. sections() Return all the configuration section names, sans DEFAULT. has_section(section) Return whether the given section exists. has_option(section, option) Return whether the given option exists in the given section. options(section) Return list of configuration options for the named section. read(filenames, encoding=None) Read and parse the iterable of named configuration files, given by name. A single filename is also allowed. Non-existing files are ignored. Return list of successfully read files. read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error messages (if f has no `name' attribute, the string `' is used). read_string(string) Read configuration from a given string. read_dict(dictionary) Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. Values are automatically converted to strings. get(section, option, raw=False, vars=None, fallback=_UNSET) Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents override any pre-existing defaults. If `option' is a key in `vars', the value from `vars' is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. getfloat(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a float. getboolean(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a boolean (currently case insensitively defined as 0, false, no, off for False, and 1, true, yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. remove_option(section, option) Remove the given option from the given section. set(section, option, value) Set the given option. write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ from collections.abc import MutableMapping from collections import ChainMap as _ChainMap import functools import io import itertools import os import re import sys import warnings __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", "InterpolationMissingOptionError", "InterpolationSyntaxError", "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", "Interpolation", "BasicInterpolation", "ExtendedInterpolation", "LegacyInterpolation", "SectionProxy", "ConverterMapping", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] _default_dict = dict DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 # exception classes class Error(Exception): """Base class for ConfigParser exceptions.""" def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) def __repr__(self): return self.message __str__ = __repr__ class NoSectionError(Error): """Raised when no section matches a requested option.""" def __init__(self, section): Error.__init__(self, 'No section: %r' % (section,)) self.section = section self.args = (section, ) class DuplicateSectionError(Error): """Raised when a section is repeated in an input source. Possible repetitions that raise this exception are: multiple creation using the API or in strict parsers when a section is found more than once in a single input file, string or dictionary. """ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") message.extend(msg) msg = message else: msg.insert(0, "Section ") Error.__init__(self, "".join(msg)) self.section = section self.source = source self.lineno = lineno self.args = (section, source, lineno) class DuplicateOptionError(Error): """Raised by strict parsers when an option is repeated in an input source. Current implementation raises this exception only when an option is found more than once in a single file, string or dictionary. """ def __init__(self, section, option, source=None, lineno=None): msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") message.extend(msg) msg = message else: msg.insert(0, "Option ") Error.__init__(self, "".join(msg)) self.section = section self.option = option self.source = source self.lineno = lineno self.args = (section, option, source, lineno) class NoOptionError(Error): """A requested option was not found.""" def __init__(self, option, section): Error.__init__(self, "No option %r in section: %r" % (option, section)) self.option = option self.section = section self.args = (option, section) class InterpolationError(Error): """Base class for interpolation-related exceptions.""" def __init__(self, option, section, msg): Error.__init__(self, msg) self.option = option self.section = section self.args = (option, section, msg) class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): msg = ("Bad value substitution: option {!r} in section {!r} contains " "an interpolation key {!r} which is not a valid option name. " "Raw value: {!r}".format(option, section, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) class InterpolationSyntaxError(InterpolationError): """Raised when the source text contains invalid syntax. Current implementation raises this exception when the source text into which substitutions are made does not conform to the required syntax. """ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): msg = ("Recursion limit exceeded in value substitution: option {!r} " "in section {!r} contains an interpolation key which " "cannot be substituted in {} steps. Raw value: {!r}" "".format(option, section, MAX_INTERPOLATION_DEPTH, rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) class ParsingError(Error): """Raised when a configuration file does not follow legal syntax.""" def __init__(self, source=None, filename=None): # Exactly one of `source'/`filename' arguments has to be given. # `filename' kept for compatibility. if filename and source: raise ValueError("Cannot specify both `filename' and `source'. " "Use `source'.") elif not filename and not source: raise ValueError("Required argument `source' not given.") elif filename: source = filename Error.__init__(self, 'Source contains parsing errors: %r' % source) self.source = source self.errors = [] self.args = (source, ) @property def filename(self): """Deprecated, use `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) return self.source @filename.setter def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value def append(self, lineno, line): self.errors.append((lineno, line)) self.message += '\n\t[line %2d]: %s' % (lineno, line) class MissingSectionHeaderError(ParsingError): """Raised when a key-value pair is found before any section header.""" def __init__(self, filename, lineno, line): Error.__init__( self, 'File contains no section headers.\nfile: %r, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno self.line = line self.args = (filename, lineno, line) # Used in parser getters to indicate the default behaviour when a specific # option is not found it to raise an exception. Created to enable `None' as # a valid fallback value. _UNSET = object() class Interpolation: """Dummy interpolation that passes the value through with no changes.""" def before_get(self, parser, section, option, value, defaults): return value def before_set(self, parser, section, option, value): return value def before_read(self, parser, section, option, value): return value def before_write(self, parser, section, option, value): return value class BasicInterpolation(Interpolation): """Interpolation as implemented in the classic ConfigParser. The option values can contain format strings which refer to other values in the same section, or values in the special default section. For example: something: %(dir)s/whatever would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage is considered a user error and raises `InterpolationSyntaxError'.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('%%', '') # escaped percent signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '%' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): rawval = parser.get(section, option, raw=True, fallback=rest) if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rawval) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = parser.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except KeyError: raise InterpolationMissingOptionError( option, section, rawval, var) from None if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', " "found: %r" % (rest,)) class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by `zc.buildout'. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('$$', '') # escaped dollar signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('$'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): rawval = parser.get(section, option, raw=True, fallback=rest) if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rawval) while rest: p = rest.find("$") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "$": accum.append("$") rest = rest[2:] elif c == "{": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) path = m.group(1).split(':') rest = rest[m.end():] sect = section opt = option try: if len(path) == 1: opt = parser.optionxform(path[0]) v = map[opt] elif len(path) == 2: sect = path[0] opt = parser.optionxform(path[1]) v = parser.get(sect, opt, raw=True) else: raise InterpolationSyntaxError( option, section, "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( option, section, rawval, ":".join(path)) from None if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'$' must be followed by '$' or '{', " "found: %r" % (rest,)) class LegacyInterpolation(Interpolation): """Deprecated interpolation used in old versions of ConfigParser. Use BasicInterpolation or ExtendedInterpolation instead.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") def before_get(self, parser, section, option, value, vars): rawval = value depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: replace = functools.partial(self._interpolation_replace, parser=parser) value = self._KEYCRE.sub(replace, value) try: value = value % vars except KeyError as e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) from None else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value def before_set(self, parser, section, option, value): return value @staticmethod def _interpolation_replace(match, parser): s = match.group(1) if s is None: return match.group() else: return "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" # Regular expressions for parsing section headers and options _SECT_TMPL = r""" \[ # [ (?P
[^]]+) # very permissive! \] # ] """ _OPT_TMPL = r""" (?P