Showing preview only (714K chars total). Download the full file or copy to clipboard to get everything.
Repository: jdbirdwell/afl
Branch: master
Commit: 0331c6736316
Files: 143
Total size: 676.2 KB
Directory structure:
gitextract_kd351qrk/
├── Makefile
├── afl-as.c
├── afl-as.h
├── afl-cmin
├── afl-fuzz.c
├── afl-gcc.c
├── afl-gotcpu.c
├── afl-plot
├── afl-showmap.c
├── afl-tmin.c
├── afl-whatsup
├── alloc-inl.h
├── config.h
├── debug.h
├── docs/
│ ├── COPYING
│ ├── ChangeLog
│ ├── INSTALL
│ ├── QuickStartGuide.txt
│ ├── README
│ ├── env_variables.txt
│ ├── historical_notes.txt
│ ├── notes_for_asan.txt
│ ├── parallel_fuzzing.txt
│ ├── perf_tips.txt
│ ├── sister_projects.txt
│ ├── status_screen.txt
│ ├── technical_details.txt
│ └── vuln_samples/
│ ├── bash-cmd-exec.var
│ ├── bash-uninit-mem.var
│ ├── file-fpu-exception.elf
│ ├── jxrlib-crash.jxr
│ ├── jxrlib-crash2.jxr
│ ├── jxrlib-crash3.jxr
│ ├── jxrlib-crash4.jxr
│ ├── lesspipe-cpio-bad-write.cpio
│ ├── libtiff-bad-write.tif
│ ├── libtiff-uninit-mem.tif
│ ├── libtiff-uninit-mem2.tif
│ ├── libtiff-uninit-mem3.tif
│ ├── libtiff-uninit-mem4.tif
│ ├── libxml2-bad-read.xml
│ ├── msie-jxr-mem-leak.jxr
│ ├── msie-tiff-mem-leak.tif
│ ├── openssl-null-ptr.der
│ ├── openssl-null-ptr2.der
│ ├── sqlite-bad-free.sql
│ ├── sqlite-bad-ptr.sql
│ ├── sqlite-bad-ptr2.sql
│ ├── sqlite-bad-ptr3.sql
│ ├── sqlite-heap-overflow.sql
│ ├── sqlite-heap-overwrite.sql
│ ├── sqlite-negative-memset.sql
│ ├── sqlite-null-ptr1.sql
│ ├── sqlite-null-ptr10.sql
│ ├── sqlite-null-ptr11.sql
│ ├── sqlite-null-ptr12.sql
│ ├── sqlite-null-ptr13.sql
│ ├── sqlite-null-ptr14.sql
│ ├── sqlite-null-ptr15.sql
│ ├── sqlite-null-ptr2.sql
│ ├── sqlite-null-ptr3.sql
│ ├── sqlite-null-ptr4.sql
│ ├── sqlite-null-ptr5.sql
│ ├── sqlite-null-ptr6.sql
│ ├── sqlite-null-ptr7.sql
│ ├── sqlite-null-ptr8.sql
│ ├── sqlite-null-ptr9.sql
│ ├── sqlite-oob-read.sql
│ ├── sqlite-oob-write.sql
│ ├── sqlite-stack-buf-overflow.sql
│ ├── sqlite-stack-exhaustion.sql
│ ├── sqlite-unint-mem.sql
│ ├── sqlite-use-after-free.sql
│ ├── strings-bfd-badptr.elf
│ ├── strings-bfd-badptr2.elf
│ ├── strings-stack-overflow
│ ├── strings-unchecked-ctr.elf
│ ├── tcpdump-arp-crash.pcap
│ ├── tcpdump-ppp-crash.pcap
│ └── unrtf-arbitrary-read.rtf
├── experimental/
│ ├── README.experiments
│ ├── argv_fuzzing/
│ │ └── argv-fuzz-inl.h
│ ├── asan_cgroups/
│ │ └── limit_memory.sh
│ ├── bash_shellshock/
│ │ └── shellshock-fuzz.diff
│ ├── canvas_harness/
│ │ └── canvas_harness.html
│ ├── clang_asm_normalize/
│ │ └── as
│ ├── crash_triage/
│ │ └── triage_crashes.sh
│ ├── distributed_fuzzing/
│ │ └── sync_script.sh
│ ├── instrumented_cmp/
│ │ └── instrumented_cmp.c
│ ├── libpng_no_checksum/
│ │ └── libpng-nocrc.patch
│ ├── persistent_demo/
│ │ └── persistent_demo.c
│ └── post_library/
│ ├── post_library.so.c
│ └── post_library_png.so.c
├── hash.h
├── llvm_mode/
│ ├── Makefile
│ ├── README.llvm
│ ├── afl-clang-fast.c
│ ├── afl-llvm-pass.so.cc
│ └── afl-llvm-rt.o.c
├── qemu_mode/
│ ├── README.qemu
│ ├── build_qemu_support.sh
│ └── patches/
│ ├── afl-qemu-cpu-inl.h
│ ├── cpu-exec.diff
│ ├── elfload.diff
│ ├── syscall.diff
│ └── translate-all.diff
├── test-instr.c
├── testcases/
│ ├── README.testcases
│ ├── _extras/
│ │ ├── gif.dict
│ │ ├── html_tags.dict
│ │ ├── jpeg.dict
│ │ ├── js.dict
│ │ ├── pdf.dict
│ │ ├── png.dict
│ │ ├── sql.dict
│ │ ├── tiff.dict
│ │ ├── webp.dict
│ │ └── xml.dict
│ ├── archives/
│ │ ├── common/
│ │ │ ├── ar/
│ │ │ │ └── small_archive.a
│ │ │ ├── bzip2/
│ │ │ │ └── small_archive.bz2
│ │ │ ├── cab/
│ │ │ │ └── small_archive.cab
│ │ │ ├── compress/
│ │ │ │ └── small_archive.Z
│ │ │ ├── cpio/
│ │ │ │ └── small_archive.cpio
│ │ │ ├── lzo/
│ │ │ │ └── small_archive.lzo
│ │ │ └── xz/
│ │ │ └── small_archive.xz
│ │ └── exotic/
│ │ ├── arj/
│ │ │ └── small_archive.arj
│ │ ├── lha/
│ │ │ └── small_archive.lha
│ │ ├── lrzip/
│ │ │ └── small_archive.lrz
│ │ ├── lzip/
│ │ │ └── small_archive.lz
│ │ ├── lzma/
│ │ │ └── small_archive.lzma
│ │ ├── rzip/
│ │ │ └── small_archive.rz
│ │ └── zoo/
│ │ └── small_archive.zoo
│ ├── images/
│ │ ├── jp2/
│ │ │ └── not_kitty.jp2
│ │ ├── jxr/
│ │ │ └── not_kitty.jxr
│ │ └── tiff/
│ │ └── not_kitty.tiff
│ └── others/
│ ├── elf/
│ │ └── small_exec.elf
│ ├── js/
│ │ └── small_script.js
│ ├── pcap/
│ │ └── small_capture.pcap
│ ├── rtf/
│ │ └── small_document.rtf
│ ├── sql/
│ │ └── simple_queries.sql
│ ├── text/
│ │ └── hello_world.txt
│ └── xml/
│ └── small_document.xml
└── types.h
================================================
FILE CONTENTS
================================================
================================================
FILE: Makefile
================================================
#
# american fuzzy lop - makefile
# -----------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
PROGNAME = afl
VERSION = 1.95b
PREFIX ?= /usr/local
BIN_PATH = $(PREFIX)/bin
HELPER_PATH = $(PREFIX)/lib/afl
DOC_PATH = $(PREFIX)/share/doc/afl
MISC_PATH = $(PREFIX)/share/afl
PROGS = afl-gcc afl-as afl-fuzz afl-showmap afl-tmin afl-gotcpu
CFLAGS ?= -O3 -funroll-loops
CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
-DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
-DBIN_PATH=\"$(BIN_PATH)\" -DVERSION=\"$(VERSION)\"
ifneq "$(filter Linux GNU%,$(shell uname))" ""
LDFLAGS += -ldl
endif
ifeq "$(findstring clang, $(shell $(CC) --version 2>/dev/null))" ""
TEST_CC = afl-gcc
else
TEST_CC = afl-clang
endif
COMM_HDR = alloc-inl.h config.h debug.h types.h
all: test_x86 $(PROGS) test_build all_done
ifndef AFL_NOX86
test_x86:
@echo "[*] Checking for the ability to compile x86 code..."
@echo 'main() { __asm__("xorb %al, %al"); }' | $(CC) -w -x c - -o .test || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "You can still try using the LLVM or QEMU mode, but see docs/INSTALL first."; echo "To ignore this error, set AFL_NOX86=1."; echo; exit 1 )
@rm -f .test
@echo "[+] Everything seems to be working, ready to compile."
else
test_x86:
@echo "[!] Note: skipping x86 compilation checks (AFL_NOX86 set)."
endif
afl-gcc: afl-gcc.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done
afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
ln -sf afl-as as
afl-fuzz: afl-fuzz.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-showmap: afl-showmap.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-tmin: afl-tmin.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
ifndef AFL_NOX86
test_build: afl-gcc afl-as afl-showmap
@echo "[*] Testing the CC wrapper and instrumentation output..."
unset AFL_USE_ASAN AFL_USE_MSAN; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS)
echo 0 | ./afl-showmap -m none -q -o .test-instr0 ./test-instr
echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr
@rm -f test-instr
@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
@echo "[+] All right, the instrumentation seems to be working!"
else
test_build: afl-gcc afl-as afl-showmap
@echo "[!] Note: skipping build tests (you may need to use LLVM or QEMU mode)."
endif
all_done: test_build
@echo "[+] All done! Be sure to review README - it's pretty short and useful."
@if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD. You can also use VirtualBox\n(virtualbox.org) to put AFL inside a Linux or *BSD VM.\n\n"; fi
@! tty <&1 >/dev/null || printf "\033[0;30mNOTE: If you can read this, your terminal probably uses white background.\nThis will make the UI hard to read. See docs/status_screen.txt for advice.\033[0m\n" 2>/dev/null
.NOTPARALLEL: clean
clean:
rm -f $(PROGS) as afl-g++ afl-clang afl-clang++ *.o *~ a.out core core.[1-9][0-9]* *.stackdump test .test test-instr .test-instr0 .test-instr1 qemu_mode/qemu-2.3.0.tar.bz2 afl-qemu-trace
rm -rf out_dir qemu_mode/qemu-2.3.0
$(MAKE) -C llvm_mode clean
install: all
mkdir -p -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH)
rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh
install -m 755 afl-gcc afl-fuzz afl-showmap afl-plot afl-tmin afl-cmin afl-gotcpu afl-whatsup $${DESTDIR}$(BIN_PATH)
if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi
if [ -f afl-clang-fast -a -f afl-llvm-pass.so -a -f afl-llvm-rt.o ]; then set -e; install -m 755 afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 afl-llvm-pass.so afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/$$i; done
install -m 755 afl-as $${DESTDIR}$(HELPER_PATH)
ln -sf afl-as $${DESTDIR}$(HELPER_PATH)/as
install -m 644 docs/README docs/ChangeLog docs/*.txt $${DESTDIR}$(DOC_PATH)
cp -r testcases/ $${DESTDIR}$(MISC_PATH)
publish: clean
test "`basename $$PWD`" = "afl" || exit 1
test -f ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz; if [ "$$?" = "0" ]; then echo; echo "Change program version in Makefile, mmkay?"; echo; exit 1; fi
cd ..; rm -rf $(PROGNAME)-$(VERSION); cp -pr $(PROGNAME) $(PROGNAME)-$(VERSION); \
tar -cvz -f ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz $(PROGNAME)-$(VERSION)
chmod 644 ~/www/afl/releases/$(PROGNAME)-$(VERSION).tgz
( cd ~/www/afl/releases/; ln -s -f $(PROGNAME)-$(VERSION).tgz $(PROGNAME)-latest.tgz )
cat docs/README >~/www/afl/README.txt
cat docs/status_screen.txt >~/www/afl/status_screen.txt
cat docs/historical_notes.txt >~/www/afl/historical_notes.txt
cat docs/technical_details.txt >~/www/afl/technical_details.txt
cat docs/ChangeLog >~/www/afl/ChangeLog.txt
cat docs/QuickStartGuide.txt >~/www/afl/QuickStartGuide.txt
echo -n "$(VERSION)" >~/www/afl/version.txt
================================================
FILE: afl-as.c
================================================
/*
american fuzzy lop - wrapper for GNU as
---------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
The sole purpose of this wrapper is to preprocess assembly files generated
by GCC / clang and inject the instrumentation bits included from afl-as.h. It
is automatically invoked by the toolchain when compiling programs using
afl-gcc / afl-clang.
Note that it's an explicit non-goal to instrument hand-written assembly,
be it in separate .s files or in __asm__ blocks. The only aspiration this
utility has right now is to be able to skip them gracefully and allow the
compilation process to continue.
That said, see experimental/clang_asm_normalize/ for a solution that may
allow clang users to make things work even with hand-crafted assembly. Just
note that there is no equivalent for GCC.
*/
#define AFL_MAIN
#include "config.h"
#include "types.h"
#include "debug.h"
#include "alloc-inl.h"
#include "afl-as.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <ctype.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/time.h>
static u8** as_params; /* Parameters passed to the real 'as' */
static u8* input_file; /* Originally specified input file */
static u8* modified_file; /* Instrumented file for the real 'as' */
static u8 be_quiet, /* Quiet mode (no stderr output) */
clang_mode, /* Running in clang mode? */
pass_thru, /* Just pass data through? */
just_version; /* Just show version? */
static u32 inst_ratio = 100, /* Instrumentation probability (%) */
as_par_cnt = 1; /* Number of params to 'as' */
/* If we don't find --32 or --64 in the command line, default to
instrumentation for whichever mode we were compiled with. This is not
perfect, but should do the trick for almost all use cases. */
#ifdef __x86_64__
static u8 use_64bit = 1;
#else
static u8 use_64bit = 0;
#ifdef __APPLE__
# error "Sorry, 32-bit Apple platforms are not supported."
#endif /* __APPLE__ */
#endif /* ^__x86_64__ */
/* Examine and modify parameters to pass to 'as'. Note that the file name
is always the last parameter passed by GCC, so we exploit this property
to keep the code simple. */
static void edit_params(int argc, char** argv) {
u8 *tmp_dir = getenv("TMPDIR"), *afl_as = getenv("AFL_AS");
u32 i;
#ifdef __APPLE__
u8 use_clang_as = 0;
/* On MacOS X, the Xcode cctool 'as' driver is a bit stale and does not work
with the code generated by newer versions of clang that are hand-built
by the user. See the thread here: http://goo.gl/HBWDtn.
To work around this, when using clang and running without AFL_AS
specified, we will actually call 'clang -c' instead of 'as -q' to
compile the assembly file.
The tools aren't cmdline-compatible, but at least for now, we can
seemingly get away with this by making only very minor tweaks. Thanks
to Nico Weber for the idea. */
if (clang_mode && !afl_as) {
use_clang_as = 1;
afl_as = getenv("AFL_CC");
if (!afl_as) afl_as = getenv("AFL_CXX");
if (!afl_as) afl_as = "clang";
}
#endif /* __APPLE__ */
/* Although this is not documented, GCC also uses TEMP and TMP when TMPDIR
is not set. We need to check these non-standard variables to properly
handle the pass_thru logic later on. */
if (!tmp_dir) tmp_dir = getenv("TEMP");
if (!tmp_dir) tmp_dir = getenv("TMP");
if (!tmp_dir) tmp_dir = "/tmp";
as_params = ck_alloc((argc + 32) * sizeof(u8*));
as_params[0] = afl_as ? afl_as : (u8*)"as";
as_params[argc] = 0;
for (i = 1; i < argc - 1; i++) {
if (!strcmp(argv[i], "--64")) use_64bit = 1;
else if (!strcmp(argv[i], "--32")) use_64bit = 0;
#ifdef __APPLE__
/* The Apple case is a bit different... */
if (!strcmp(argv[i], "-arch") && i + 1 < argc) {
if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1;
else if (!strcmp(argv[i + 1], "i386"))
FATAL("Sorry, 32-bit Apple platforms are not supported.");
}
/* Strip options that set the preference for a particular upstream
assembler in Xcode. */
if (clang_mode && (!strcmp(argv[i], "-q") || !strcmp(argv[i], "-Q")))
continue;
#endif /* __APPLE__ */
as_params[as_par_cnt++] = argv[i];
}
#ifdef __APPLE__
/* When calling clang as the upstream assembler, append -c -x assembler
and hope for the best. */
if (use_clang_as) {
as_params[as_par_cnt++] = "-c";
as_params[as_par_cnt++] = "-x";
as_params[as_par_cnt++] = "assembler";
}
#endif /* __APPLE__ */
input_file = argv[argc - 1];
if (input_file[0] == '-') {
if (!strcmp(input_file + 1, "-version")) {
just_version = 1;
modified_file = input_file;
goto wrap_things_up;
}
if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)");
else input_file = NULL;
} else {
/* Check if this looks like a standard invocation as a part of an attempt
to compile a program, rather than using gcc on an ad-hoc .s file in
a format we may not understand. This works around an issue compiling
NSS. */
if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) &&
strncmp(input_file, "/var/tmp/", 9) &&
strncmp(input_file, "/tmp/", 5)) pass_thru = 1;
}
modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(),
(u32)time(NULL));
wrap_things_up:
as_params[as_par_cnt++] = modified_file;
as_params[as_par_cnt] = NULL;
}
/* Process input file, generate modified_file. Insert instrumentation in all
the appropriate places. */
static void add_instrumentation(void) {
static u8 line[MAX_LINE];
FILE* inf;
FILE* outf;
s32 outfd;
u32 ins_lines = 0;
u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0,
skip_intel = 0, skip_app = 0, instrument_next = 0;
#ifdef __APPLE__
u8* colon_pos;
#endif /* __APPLE__ */
if (input_file) {
inf = fopen(input_file, "r");
if (!inf) PFATAL("Unable to read '%s'", input_file);
} else inf = stdin;
outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600);
if (outfd < 0) PFATAL("Unable to write to '%s'", modified_file);
outf = fdopen(outfd, "w");
if (!outf) PFATAL("fdopen() failed");
while (fgets(line, MAX_LINE, inf)) {
/* In some cases, we want to defer writing the instrumentation trampoline
until after all the labels, macros, comments, etc. If we're in this
mode, and if the line starts with a tab followed by a character, dump
the trampoline now. */
if (!pass_thru && !skip_intel && !skip_app && !skip_csect && instr_ok &&
instrument_next && line[0] == '\t' && isalpha(line[1])) {
fprintf(outf, use_64bit ? trampoline_fmt_64 : trampoline_fmt_32,
R(MAP_SIZE));
instrument_next = 0;
ins_lines++;
}
/* Output the actual line, call it a day in pass-thru mode. */
fputs(line, outf);
if (pass_thru) continue;
/* All right, this is where the actual fun begins. For one, we only want to
instrument the .text section. So, let's keep track of that in processed
files - and let's set instr_ok accordingly. */
if (line[0] == '\t' && line[1] == '.') {
/* OpenBSD puts jump tables directly inline with the code, which is
a bit annoying. They use a specific format of p2align directives
around them, so we use that as a signal. */
if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) &&
isdigit(line[10]) && line[11] == '\n') skip_next_label = 1;
if (!strncmp(line + 2, "text\n", 5) ||
!strncmp(line + 2, "section\t.text", 13) ||
!strncmp(line + 2, "section\t__TEXT,__text", 21) ||
!strncmp(line + 2, "section __TEXT,__text", 21)) {
instr_ok = 1;
continue;
}
if (!strncmp(line + 2, "section\t", 8) ||
!strncmp(line + 2, "section ", 8) ||
!strncmp(line + 2, "bss\n", 4) ||
!strncmp(line + 2, "data\n", 5)) {
instr_ok = 0;
continue;
}
}
/* Detect off-flavor assembly (rare, happens in gdb). When this is
encountered, we set skip_csect until the opposite directive is
seen, and we do not instrument. */
if (strstr(line, ".code")) {
if (strstr(line, ".code32")) skip_csect = use_64bit;
if (strstr(line, ".code64")) skip_csect = !use_64bit;
}
/* Detect syntax changes, as could happen with hand-written assembly.
Skip Intel blocks, resume instrumentation when back to AT&T. */
if (strstr(line, ".intel_syntax")) skip_intel = 1;
if (strstr(line, ".att_syntax")) skip_intel = 0;
/* Detect and skip ad-hoc __asm__ blocks, likewise skipping them. */
if (line[0] == '#' || line[1] == '#') {
if (strstr(line, "#APP")) skip_app = 1;
if (strstr(line, "#NO_APP")) skip_app = 0;
}
/* If we're in the right mood for instrumenting, check for function
names or conditional labels. This is a bit messy, but in essence,
we want to catch:
^main: - function entry point (always instrumented)
^.L0: - GCC branch label
^.LBB0_0: - clang branch label (but only in clang mode)
^\tjnz foo - conditional branches
...but not:
^# BB#0: - clang comments
^ # BB#0: - ditto
^.Ltmp0: - clang non-branch labels
^.LC0 - GCC non-branch labels
^.LBB0_0: - ditto (when in GCC mode)
^\tjmp foo - non-conditional jumps
Additionally, clang and GCC on MacOS X follow a different convention
with no leading dots on labels, hence the weird maze of #ifdefs
later on.
*/
if (skip_intel || skip_app || skip_csect || !instr_ok ||
line[0] == '#' || line[0] == ' ') continue;
/* Conditional branch instruction (jnz, etc). We append the instrumentation
right after the branch (to instrument the not-taken path) and at the
branch destination label (handled later on). */
if (line[0] == '\t') {
if (line[1] == 'j' && line[2] != 'm' && R(100) < inst_ratio) {
fprintf(outf, use_64bit ? trampoline_fmt_64 : trampoline_fmt_32,
R(MAP_SIZE));
ins_lines++;
}
continue;
}
/* Label of some sort. This may be a branch destination, but we need to
tread carefully and account for several different formatting
conventions. */
#ifdef __APPLE__
/* Apple: L<whatever><digit>: */
if ((colon_pos = strstr(line, ":"))) {
if (line[0] == 'L' && isdigit(*(colon_pos - 1))) {
#else
/* Everybody else: .L<whatever>: */
if (strstr(line, ":")) {
if (line[0] == '.') {
#endif /* __APPLE__ */
/* .L0: or LBB0_0: style jump destination */
#ifdef __APPLE__
/* Apple: L<num> / LBB<num> */
if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3)))
&& R(100) < inst_ratio) {
#else
/* Apple: .L<num> / .LBB<num> */
if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3)))
&& R(100) < inst_ratio) {
#endif /* __APPLE__ */
/* An optimization is possible here by adding the code only if the
label is mentioned in the code in contexts other than call / jmp.
That said, this complicates the code by requiring two-pass
processing (messy with stdin), and results in a speed gain
typically under 10%, because compilers are generally pretty good
about not generating spurious intra-function jumps.
We use deferred output chiefly to avoid disrupting
.Lfunc_begin0-style exception handling calculations (a problem on
MacOS X). */
if (!skip_next_label) instrument_next = 1; else skip_next_label = 0;
}
} else {
/* Function label (always instrumented, deferred mode). */
instrument_next = 1;
}
}
}
if (ins_lines)
fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
if (input_file) fclose(inf);
fclose(outf);
if (!be_quiet) {
if (!ins_lines) WARNF("No instrumentation targets found%s.",
pass_thru ? " (pass-thru mode)" : "");
else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).",
ins_lines, use_64bit ? "64" : "32",
getenv("AFL_HARDEN") ? "hardened" : "non-hardened",
inst_ratio);
}
}
/* Main entry point */
int main(int argc, char** argv) {
s32 pid;
u32 rand_seed;
int status;
u8* inst_ratio_str = getenv("AFL_INST_RATIO");
struct timeval tv;
struct timezone tz;
clang_mode = !!getenv(CLANG_ENV_VAR);
if (isatty(2) && !getenv("AFL_QUIET")) {
SAYF(cCYA "afl-as " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
} else be_quiet = 1;
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n"
"executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n"
"don't want to run this program directly.\n\n"
"Rarely, when dealing with extremely complex projects, it may be advisable to\n"
"set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n"
"instrumenting every discovered branch.\n\n");
exit(1);
}
gettimeofday(&tv, &tz);
rand_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
srandom(rand_seed);
edit_params(argc, argv);
if (inst_ratio_str) {
if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100)
FATAL("Bad value of AFL_INST_RATIO (must be between 0 and 100)");
}
if (getenv(AS_LOOP_ENV_VAR))
FATAL("Endless loop when calling 'as' (remove '.' from your PATH)");
setenv(AS_LOOP_ENV_VAR, "1", 1);
/* When compiling with ASAN, we don't have a particularly elegant way to skip
ASAN-specific branches. But we can probabilistically compensate for
that... */
if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) inst_ratio /= 3;
if (!just_version) add_instrumentation();
if (!(pid = fork())) {
execvp(as_params[0], (char**)as_params);
FATAL("Oops, failed to execute '%s' - check your PATH", as_params[0]);
}
if (pid < 0) PFATAL("fork() failed");
if (waitpid(pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (!getenv("AFL_KEEP_ASSEMBLY")) unlink(modified_file);
exit(WEXITSTATUS(status));
}
================================================
FILE: afl-as.h
================================================
/*
american fuzzy lop - injectable parts
-------------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This file houses the assembly-level instrumentation injected into fuzzed
programs. The instrumentation stores XORed pairs of data: identifiers of the
currently executing branch and the one that executed immediately before.
TL;DR: the instrumentation does shm_trace_map[cur_loc ^ prev_loc]++
The code is designed for 32-bit and 64-bit x86 systems. Both modes should
work everywhere except for Apple systems. Apple does relocations differently
from everybody else, so since their OSes have been 64-bit for a longer while,
I didn't go through the mental effort of porting the 32-bit code.
In principle, similar code should be easy to inject into any well-behaved
binary-only code (e.g., using DynamoRIO). Conditional jumps offer natural
targets for instrumentation, and should offer comparable probe density.
*/
#ifndef _HAVE_AFL_AS_H
#define _HAVE_AFL_AS_H
#include "config.h"
#include "types.h"
/*
------------------
Performances notes
------------------
Contributions to make this code faster are appreciated! Here are some
rough notes that may help with the task:
- Only the trampoline_fmt and the non-setup __afl_maybe_log code paths are
really worth optimizing; the setup / fork server stuff matters a lot less
and should be mostly just kept readable.
- We're aiming for modern CPUs with out-of-order execution and large
pipelines; the code is mostly follows intuitive, human-readable
instruction ordering, because "textbook" manual reorderings make no
substantial difference.
- Interestingly, instrumented execution isn't a lot faster if we store a
variable pointer to the setup, log, or return routine and then do a reg
call from within trampoline_fmt. It does speed up non-instrumented
execution quite a bit, though, since that path just becomes
push-call-ret-pop.
- There is also not a whole lot to be gained by doing SHM attach at a
fixed address instead of retrieving __afl_area_ptr. Although it allows us
to have a shorter log routine inserted for conditional jumps and jump
labels (for a ~10% perf gain), there is a risk of bumping into other
allocations created by the program or by tools such as ASAN.
- popf is *awfully* slow, which is why we're doing the lahf / sahf +
overflow test trick. Unfortunately, this forces us to taint eax / rax, but
this dependency on a commonly-used register still beats the alternative of
using pushf / popf.
One possible optimization is to avoid touching flags by using a circular
buffer that stores just a sequence of current locations, with the XOR stuff
happening offline. Alas, this doesn't seem to have a huge impact:
https://groups.google.com/d/msg/afl-users/MsajVf4fRLo/2u6t88ntUBIJ
- Preforking one child a bit sooner, and then waiting for the "go" command
from within the child, doesn't offer major performance gains; fork() seems
to be relatively inexpensive these days. Preforking multiple children does
help, but badly breaks the "~1 core per fuzzer" design, making it harder to
scale up. Maybe there is some middle ground.
Perhaps of note: in the 64-bit version for all platforms except for Apple,
the instrumentation is done slightly differently than on 32-bit, with
__afl_prev_loc and __afl_area_ptr being local to the object file (.lcomm),
rather than global (.comm). This is to avoid GOTRELPC lookups in the critical
code path, which AFAICT, are otherwise unavoidable if we want gcc -shared to
work; simple relocations between .bss and .text won't work on most 64-bit
platforms in such a case.
(Fun fact: on Apple systems, .lcomm can segfault the linker.)
The side effect is that state transitions are measured in a somewhat
different way, with previous tuple being recorded separately within the scope
of every .c file. This should have no impact in any practical sense.
Another side effect of this design is that getenv() will be called once per
every .o file when running in non-instrumented mode; an since getenv() tends
to be optimized in funny ways, we need to be very careful to save every
oddball register it may touch.
*/
static const u8* trampoline_fmt_32 =
"\n"
"/* --- AFL TRAMPOLINE (32-BIT) --- */\n"
"\n"
".align 4\n"
"\n"
"leal -16(%%esp), %%esp\n"
"movl %%edi, 0(%%esp)\n"
"movl %%edx, 4(%%esp)\n"
"movl %%ecx, 8(%%esp)\n"
"movl %%eax, 12(%%esp)\n"
"movl $0x%08x, %%ecx\n"
"call __afl_maybe_log\n"
"movl 12(%%esp), %%eax\n"
"movl 8(%%esp), %%ecx\n"
"movl 4(%%esp), %%edx\n"
"movl 0(%%esp), %%edi\n"
"leal 16(%%esp), %%esp\n"
"\n"
"/* --- END --- */\n"
"\n";
static const u8* trampoline_fmt_64 =
"\n"
"/* --- AFL TRAMPOLINE (64-BIT) --- */\n"
"\n"
".align 4\n"
"\n"
"leaq -(128+24)(%%rsp), %%rsp\n"
"movq %%rdx, 0(%%rsp)\n"
"movq %%rcx, 8(%%rsp)\n"
"movq %%rax, 16(%%rsp)\n"
"movq $0x%08x, %%rcx\n"
"call __afl_maybe_log\n"
"movq 16(%%rsp), %%rax\n"
"movq 8(%%rsp), %%rcx\n"
"movq 0(%%rsp), %%rdx\n"
"leaq (128+24)(%%rsp), %%rsp\n"
"\n"
"/* --- END --- */\n"
"\n";
static const u8* main_payload_32 =
"\n"
"/* --- AFL MAIN PAYLOAD (32-BIT) --- */\n"
"\n"
".text\n"
".att_syntax\n"
".code32\n"
".align 8\n"
"\n"
"__afl_maybe_log:\n"
"\n"
" lahf\n"
" seto %al\n"
"\n"
" /* Check if SHM region is already mapped. */\n"
"\n"
" movl __afl_area_ptr, %edx\n"
" testl %edx, %edx\n"
" je __afl_setup\n"
"\n"
"__afl_store:\n"
"\n"
" /* Calculate and store hit for the code location specified in ecx. There\n"
" is a double-XOR way of doing this without tainting another register,\n"
" and we use it on 64-bit systems; but it's slower for 32-bit ones. */\n"
"\n"
#ifndef COVERAGE_ONLY
" movl __afl_prev_loc, %edi\n"
" xorl %ecx, %edi\n"
" shrl $1, %ecx\n"
" movl %ecx, __afl_prev_loc\n"
#endif /* !COVERAGE_ONLY */
"\n"
#ifdef SKIP_COUNTS
" orb $1, (%edx, %edi, 1)\n"
#else
" incb (%edx, %edi, 1)\n"
#endif /* ^SKIP_COUNTS */
"\n"
"__afl_return:\n"
"\n"
" addb $127, %al\n"
" sahf\n"
" ret\n"
"\n"
".align 8\n"
"\n"
"__afl_setup:\n"
"\n"
" /* Do not retry setup if we had previous failures. */\n"
"\n"
" cmpb $0, __afl_setup_failure\n"
" jne __afl_return\n"
"\n"
" /* Map SHM, jumping to __afl_setup_abort if something goes wrong.\n"
" We do not save FPU/MMX/SSE registers here, but hopefully, nobody\n"
" will notice this early in the game. */\n"
"\n"
" pushl %eax\n"
" pushl %ecx\n"
"\n"
" pushl $.AFL_SHM_ENV\n"
" call getenv\n"
" addl $4, %esp\n"
"\n"
" testl %eax, %eax\n"
" je __afl_setup_abort\n"
"\n"
" pushl %eax\n"
" call atoi\n"
" addl $4, %esp\n"
"\n"
" pushl $0 /* shmat flags */\n"
" pushl $0 /* requested addr */\n"
" pushl %eax /* SHM ID */\n"
" call shmat\n"
" addl $12, %esp\n"
"\n"
" cmpl $-1, %eax\n"
" je __afl_setup_abort\n"
"\n"
" /* Store the address of the SHM region. */\n"
"\n"
" movl %eax, __afl_area_ptr\n"
" movl %eax, %edx\n"
"\n"
" popl %ecx\n"
" popl %eax\n"
"\n"
"__afl_forkserver:\n"
"\n"
" /* Enter the fork server mode to avoid the overhead of execve() calls. */\n"
"\n"
" pushl %eax\n"
" pushl %ecx\n"
" pushl %edx\n"
"\n"
" /* Phone home and tell the parent that we're OK. (Note that signals with\n"
" no SA_RESTART will mess it up). If this fails, assume that the fd is\n"
" closed because we were execve()d from an instrumented binary, or because\n"
" the parent doesn't want to use the fork server. */\n"
"\n"
" pushl $4 /* length */\n"
" pushl $__afl_temp /* data */\n"
" pushl $" STRINGIFY((FORKSRV_FD + 1)) " /* file desc */\n"
" call write\n"
" addl $12, %esp\n"
"\n"
" cmpl $4, %eax\n"
" jne __afl_fork_resume\n"
"\n"
"__afl_fork_wait_loop:\n"
"\n"
" /* Wait for parent by reading from the pipe. Abort if read fails. */\n"
"\n"
" pushl $4 /* length */\n"
" pushl $__afl_temp /* data */\n"
" pushl $" STRINGIFY(FORKSRV_FD) " /* file desc */\n"
" call read\n"
" addl $12, %esp\n"
"\n"
" cmpl $4, %eax\n"
" jne __afl_die\n"
"\n"
" /* Once woken up, create a clone of our process. This is an excellent use\n"
" case for syscall(__NR_clone, 0, CLONE_PARENT), but glibc boneheadedly\n"
" caches getpid() results and offers no way to update the value, breaking\n"
" abort(), raise(), and a bunch of other things :-( */\n"
"\n"
" call fork\n"
"\n"
" cmpl $0, %eax\n"
" jl __afl_die\n"
" je __afl_fork_resume\n"
"\n"
" /* In parent process: write PID to pipe, then wait for child. */\n"
"\n"
" movl %eax, __afl_fork_pid\n"
"\n"
" pushl $4 /* length */\n"
" pushl $__afl_fork_pid /* data */\n"
" pushl $" STRINGIFY((FORKSRV_FD + 1)) " /* file desc */\n"
" call write\n"
" addl $12, %esp\n"
"\n"
" pushl $0 /* no flags */\n"
" pushl $__afl_temp /* status */\n"
" pushl __afl_fork_pid /* PID */\n"
" call waitpid\n"
" addl $12, %esp\n"
"\n"
" cmpl $0, %eax\n"
" jle __afl_die\n"
"\n"
" /* Relay wait status to pipe, then loop back. */\n"
"\n"
" pushl $4 /* length */\n"
" pushl $__afl_temp /* data */\n"
" pushl $" STRINGIFY((FORKSRV_FD + 1)) " /* file desc */\n"
" call write\n"
" addl $12, %esp\n"
"\n"
" jmp __afl_fork_wait_loop\n"
"\n"
"__afl_fork_resume:\n"
"\n"
" /* In child process: close fds, resume execution. */\n"
"\n"
" pushl $" STRINGIFY(FORKSRV_FD) "\n"
" call close\n"
"\n"
" pushl $" STRINGIFY((FORKSRV_FD + 1)) "\n"
" call close\n"
"\n"
" addl $8, %esp\n"
"\n"
" popl %edx\n"
" popl %ecx\n"
" popl %eax\n"
" jmp __afl_store\n"
"\n"
"__afl_die:\n"
"\n"
" xorl %eax, %eax\n"
" call _exit\n"
"\n"
"__afl_setup_abort:\n"
"\n"
" /* Record setup failure so that we don't keep calling\n"
" shmget() / shmat() over and over again. */\n"
"\n"
" incb __afl_setup_failure\n"
" popl %ecx\n"
" popl %eax\n"
" jmp __afl_return\n"
"\n"
".AFL_VARS:\n"
"\n"
" .comm __afl_area_ptr, 4, 32\n"
" .comm __afl_setup_failure, 1, 32\n"
#ifndef COVERAGE_ONLY
" .comm __afl_prev_loc, 4, 32\n"
#endif /* !COVERAGE_ONLY */
" .comm __afl_fork_pid, 4, 32\n"
" .comm __afl_temp, 4, 32\n"
"\n"
".AFL_SHM_ENV:\n"
" .asciz \"" SHM_ENV_VAR "\"\n"
"\n"
"/* --- END --- */\n"
"\n";
/* The OpenBSD hack is due to lahf and sahf not being recognized by some
versions of binutils: http://marc.info/?l=openbsd-cvs&m=141636589924400
The Apple code is a bit different when calling libc functions because
they are doing relocations differently from everybody else. We also need
to work around the crash issue with .lcomm and the fact that they don't
recognize .string. */
#ifdef __APPLE__
# define CALL_L64(str) "call _" str "\n"
#else
# define CALL_L64(str) "call " str "@PLT\n"
#endif /* ^__APPLE__ */
static const u8* main_payload_64 =
"\n"
"/* --- AFL MAIN PAYLOAD (64-BIT) --- */\n"
"\n"
".text\n"
".att_syntax\n"
".code64\n"
".align 8\n"
"\n"
"__afl_maybe_log:\n"
"\n"
#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
" .byte 0x9f /* lahf */\n"
#else
" lahf\n"
#endif /* ^__OpenBSD__, etc */
" seto %al\n"
"\n"
" /* Check if SHM region is already mapped. */\n"
"\n"
" movq __afl_area_ptr(%rip), %rdx\n"
" testq %rdx, %rdx\n"
" je __afl_setup\n"
"\n"
"__afl_store:\n"
"\n"
" /* Calculate and store hit for the code location specified in rcx. */\n"
"\n"
#ifndef COVERAGE_ONLY
" xorq __afl_prev_loc(%rip), %rcx\n"
" xorq %rcx, __afl_prev_loc(%rip)\n"
" shrq $1, __afl_prev_loc(%rip)\n"
#endif /* ^!COVERAGE_ONLY */
"\n"
#ifdef SKIP_COUNTS
" orb $1, (%rdx, %rcx, 1)\n"
#else
" incb (%rdx, %rcx, 1)\n"
#endif /* ^SKIP_COUNTS */
"\n"
"__afl_return:\n"
"\n"
" addb $127, %al\n"
#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
" .byte 0x9e /* sahf */\n"
#else
" sahf\n"
#endif /* ^__OpenBSD__, etc */
" ret\n"
"\n"
".align 8\n"
"\n"
"__afl_setup:\n"
"\n"
" /* Do not retry setup if we had previous failures. */\n"
"\n"
" cmpb $0, __afl_setup_failure(%rip)\n"
" jne __afl_return\n"
"\n"
" /* Check out if we have a global pointer on file. */\n"
"\n"
#ifndef __APPLE__
" movq __afl_global_area_ptr@GOTPCREL(%rip), %rdx\n"
" movq (%rdx), %rdx\n"
#else
" movq __afl_global_area_ptr(%rip), %rdx\n"
#endif /* !^__APPLE__ */
" testq %rdx, %rdx\n"
" je __afl_setup_first\n"
"\n"
" movq %rdx, __afl_area_ptr(%rip)\n"
" jmp __afl_store\n"
"\n"
"__afl_setup_first:\n"
"\n"
" /* Save everything that is not yet saved and that may be touched by\n"
" getenv() and several other libcalls we'll be relying on. */\n"
"\n"
" leaq -352(%rsp), %rsp\n"
"\n"
" movq %rax, 0(%rsp)\n"
" movq %rcx, 8(%rsp)\n"
" movq %rdi, 16(%rsp)\n"
" movq %rsi, 32(%rsp)\n"
" movq %r8, 40(%rsp)\n"
" movq %r9, 48(%rsp)\n"
" movq %r10, 56(%rsp)\n"
" movq %r11, 64(%rsp)\n"
"\n"
" movq %xmm0, 96(%rsp)\n"
" movq %xmm1, 112(%rsp)\n"
" movq %xmm2, 128(%rsp)\n"
" movq %xmm3, 144(%rsp)\n"
" movq %xmm4, 160(%rsp)\n"
" movq %xmm5, 176(%rsp)\n"
" movq %xmm6, 192(%rsp)\n"
" movq %xmm7, 208(%rsp)\n"
" movq %xmm8, 224(%rsp)\n"
" movq %xmm9, 240(%rsp)\n"
" movq %xmm10, 256(%rsp)\n"
" movq %xmm11, 272(%rsp)\n"
" movq %xmm12, 288(%rsp)\n"
" movq %xmm13, 304(%rsp)\n"
" movq %xmm14, 320(%rsp)\n"
" movq %xmm15, 336(%rsp)\n"
"\n"
" /* Map SHM, jumping to __afl_setup_abort if something goes wrong. */\n"
"\n"
" /* The 64-bit ABI requires 16-byte stack alignment. We'll keep the\n"
" original stack ptr in the callee-saved r12. */\n"
"\n"
" pushq %r12\n"
" movq %rsp, %r12\n"
" subq $16, %rsp\n"
" andq $0xfffffffffffffff0, %rsp\n"
"\n"
" leaq .AFL_SHM_ENV(%rip), %rdi\n"
CALL_L64("getenv")
"\n"
" testq %rax, %rax\n"
" je __afl_setup_abort\n"
"\n"
" movq %rax, %rdi\n"
CALL_L64("atoi")
"\n"
" xorq %rdx, %rdx /* shmat flags */\n"
" xorq %rsi, %rsi /* requested addr */\n"
" movq %rax, %rdi /* SHM ID */\n"
CALL_L64("shmat")
"\n"
" cmpq $-1, %rax\n"
" je __afl_setup_abort\n"
"\n"
" /* Store the address of the SHM region. */\n"
"\n"
" movq %rax, %rdx\n"
" movq %rax, __afl_area_ptr(%rip)\n"
"\n"
#ifdef __APPLE__
" movq %rax, __afl_global_area_ptr(%rip)\n"
#else
" movq __afl_global_area_ptr@GOTPCREL(%rip), %rdx\n"
" movq %rax, (%rdx)\n"
#endif /* ^__APPLE__ */
" movq %rax, %rdx\n"
"\n"
"__afl_forkserver:\n"
"\n"
" /* Enter the fork server mode to avoid the overhead of execve() calls. We\n"
" push rdx (area ptr) twice to keep stack alignment neat. */\n"
"\n"
" pushq %rdx\n"
" pushq %rdx\n"
"\n"
" /* Phone home and tell the parent that we're OK. (Note that signals with\n"
" no SA_RESTART will mess it up). If this fails, assume that the fd is\n"
" closed because we were execve()d from an instrumented binary, or because\n"
" the parent doesn't want to use the fork server. */\n"
"\n"
" movq $4, %rdx /* length */\n"
" leaq __afl_temp(%rip), %rsi /* data */\n"
" movq $" STRINGIFY((FORKSRV_FD + 1)) ", %rdi /* file desc */\n"
CALL_L64("write")
"\n"
" cmpq $4, %rax\n"
" jne __afl_fork_resume\n"
"\n"
"__afl_fork_wait_loop:\n"
"\n"
" /* Wait for parent by reading from the pipe. Abort if read fails. */\n"
"\n"
" movq $4, %rdx /* length */\n"
" leaq __afl_temp(%rip), %rsi /* data */\n"
" movq $" STRINGIFY(FORKSRV_FD) ", %rdi /* file desc */\n"
CALL_L64("read")
" cmpq $4, %rax\n"
" jne __afl_die\n"
"\n"
" /* Once woken up, create a clone of our process. This is an excellent use\n"
" case for syscall(__NR_clone, 0, CLONE_PARENT), but glibc boneheadedly\n"
" caches getpid() results and offers no way to update the value, breaking\n"
" abort(), raise(), and a bunch of other things :-( */\n"
"\n"
CALL_L64("fork")
" cmpq $0, %rax\n"
" jl __afl_die\n"
" je __afl_fork_resume\n"
"\n"
" /* In parent process: write PID to pipe, then wait for child. */\n"
"\n"
" movl %eax, __afl_fork_pid(%rip)\n"
"\n"
" movq $4, %rdx /* length */\n"
" leaq __afl_fork_pid(%rip), %rsi /* data */\n"
" movq $" STRINGIFY((FORKSRV_FD + 1)) ", %rdi /* file desc */\n"
CALL_L64("write")
"\n"
" movq $0, %rdx /* no flags */\n"
" leaq __afl_temp(%rip), %rsi /* status */\n"
" movq __afl_fork_pid(%rip), %rdi /* PID */\n"
CALL_L64("waitpid")
" cmpq $0, %rax\n"
" jle __afl_die\n"
"\n"
" /* Relay wait status to pipe, then loop back. */\n"
"\n"
" movq $4, %rdx /* length */\n"
" leaq __afl_temp(%rip), %rsi /* data */\n"
" movq $" STRINGIFY((FORKSRV_FD + 1)) ", %rdi /* file desc */\n"
CALL_L64("write")
"\n"
" jmp __afl_fork_wait_loop\n"
"\n"
"__afl_fork_resume:\n"
"\n"
" /* In child process: close fds, resume execution. */\n"
"\n"
" movq $" STRINGIFY(FORKSRV_FD) ", %rdi\n"
CALL_L64("close")
"\n"
" movq $" STRINGIFY((FORKSRV_FD + 1)) ", %rdi\n"
CALL_L64("close")
"\n"
" popq %rdx\n"
" popq %rdx\n"
"\n"
" movq %r12, %rsp\n"
" popq %r12\n"
"\n"
" movq 0(%rsp), %rax\n"
" movq 8(%rsp), %rcx\n"
" movq 16(%rsp), %rdi\n"
" movq 32(%rsp), %rsi\n"
" movq 40(%rsp), %r8\n"
" movq 48(%rsp), %r9\n"
" movq 56(%rsp), %r10\n"
" movq 64(%rsp), %r11\n"
"\n"
" movq 96(%rsp), %xmm0\n"
" movq 112(%rsp), %xmm1\n"
" movq 128(%rsp), %xmm2\n"
" movq 144(%rsp), %xmm3\n"
" movq 160(%rsp), %xmm4\n"
" movq 176(%rsp), %xmm5\n"
" movq 192(%rsp), %xmm6\n"
" movq 208(%rsp), %xmm7\n"
" movq 224(%rsp), %xmm8\n"
" movq 240(%rsp), %xmm9\n"
" movq 256(%rsp), %xmm10\n"
" movq 272(%rsp), %xmm11\n"
" movq 288(%rsp), %xmm12\n"
" movq 304(%rsp), %xmm13\n"
" movq 320(%rsp), %xmm14\n"
" movq 336(%rsp), %xmm15\n"
"\n"
" leaq 352(%rsp), %rsp\n"
"\n"
" jmp __afl_store\n"
"\n"
"__afl_die:\n"
"\n"
" xorq %rax, %rax\n"
CALL_L64("_exit")
"\n"
"__afl_setup_abort:\n"
"\n"
" /* Record setup failure so that we don't keep calling\n"
" shmget() / shmat() over and over again. */\n"
"\n"
" incb __afl_setup_failure(%rip)\n"
"\n"
" movq %r12, %rsp\n"
" popq %r12\n"
"\n"
" movq 0(%rsp), %rax\n"
" movq 8(%rsp), %rcx\n"
" movq 16(%rsp), %rdi\n"
" movq 32(%rsp), %rsi\n"
" movq 40(%rsp), %r8\n"
" movq 48(%rsp), %r9\n"
" movq 56(%rsp), %r10\n"
" movq 64(%rsp), %r11\n"
"\n"
" movq 96(%rsp), %xmm0\n"
" movq 112(%rsp), %xmm1\n"
" movq 128(%rsp), %xmm2\n"
" movq 144(%rsp), %xmm3\n"
" movq 160(%rsp), %xmm4\n"
" movq 176(%rsp), %xmm5\n"
" movq 192(%rsp), %xmm6\n"
" movq 208(%rsp), %xmm7\n"
" movq 224(%rsp), %xmm8\n"
" movq 240(%rsp), %xmm9\n"
" movq 256(%rsp), %xmm10\n"
" movq 272(%rsp), %xmm11\n"
" movq 288(%rsp), %xmm12\n"
" movq 304(%rsp), %xmm13\n"
" movq 320(%rsp), %xmm14\n"
" movq 336(%rsp), %xmm15\n"
"\n"
" leaq 352(%rsp), %rsp\n"
"\n"
" jmp __afl_return\n"
"\n"
".AFL_VARS:\n"
"\n"
#ifdef __APPLE__
" .comm __afl_area_ptr, 8\n"
#ifndef COVERAGE_ONLY
" .comm __afl_prev_loc, 8\n"
#endif /* !COVERAGE_ONLY */
" .comm __afl_fork_pid, 4\n"
" .comm __afl_temp, 4\n"
" .comm __afl_setup_failure, 1\n"
#else
" .lcomm __afl_area_ptr, 8\n"
#ifndef COVERAGE_ONLY
" .lcomm __afl_prev_loc, 8\n"
#endif /* !COVERAGE_ONLY */
" .lcomm __afl_fork_pid, 4\n"
" .lcomm __afl_temp, 4\n"
" .lcomm __afl_setup_failure, 1\n"
#endif /* ^__APPLE__ */
" .comm __afl_global_area_ptr, 8, 8\n"
"\n"
".AFL_SHM_ENV:\n"
" .asciz \"" SHM_ENV_VAR "\"\n"
"\n"
"/* --- END --- */\n"
"\n";
#endif /* !_HAVE_AFL_AS_H */
================================================
FILE: afl-cmin
================================================
#!/usr/bin/env bash
#
# american fuzzy lop - corpus minimization tool
# ---------------------------------------------
#
# Written and maintained by Michal Zalewski <lcamtuf@google.com>
#
# Copyright 2014, 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This tool tries to find the smallest subset of files in the input directory
# that still trigger the full range of instrumentation data points seen in
# the starting corpus. This has two uses:
#
# - Screening large corpora of input files before using them as a seed for
# afl-fuzz. The tool will remove functionally redundant files and likely
# leave you with a much smaller set.
#
# (In this case, you probably also want to consider running afl-tmin on
# the individual files later on to reduce their size.)
#
# - Minimizing the corpus generated organically by afl-fuzz, perhaps when
# planning to feed it to more resource-intensive tools. The tool achieves
# this by removing all entries that used to trigger unique behaviors in the
# past, but have been made obsolete by later finds.
#
# Note that the tool doesn't modify the files themselves. For that, you want
# afl-tmin.
#
# This script must use bash because other shells may have hardcoded limits on
# array sizes.
#
echo "corpus minimization tool for afl-fuzz by <lcamtuf@google.com>"
echo
#########
# SETUP #
#########
# Process command-line options...
MEM_LIMIT=100
TIMEOUT=none
unset IN_DIR OUT_DIR STDIN_FILE EXTRA_PAR MEM_LIMIT_GIVEN \
AFL_CMIN_CRASHES_ONLY AFL_CMIN_ALLOW_ANY QEMU_MODE
while getopts "+i:o:f:m:t:eQC" opt; do
case "$opt" in
"i")
IN_DIR="$OPTARG"
;;
"o")
OUT_DIR="$OPTARG"
;;
"f")
STDIN_FILE="$OPTARG"
;;
"m")
MEM_LIMIT="$OPTARG"
MEM_LIMIT_GIVEN=1
;;
"t")
TIMEOUT="$OPTARG"
;;
"e")
EXTRA_PAR="$EXTRA_PAR -e"
;;
"C")
export AFL_CMIN_CRASHES_ONLY=1
;;
"Q")
EXTRA_PAR="$EXTRA_PAR -Q"
test "$MEM_LIMIT_GIVEN" = "" && MEM_LIMIT=250
QEMU_MODE=1
;;
"?")
exit 1
;;
esac
done
shift $((OPTIND-1))
TARGET_BIN="$1"
if [ "$TARGET_BIN" = "" -o "$IN_DIR" = "" -o "$OUT_DIR" = "" ]; then
cat 1>&2 <<_EOF_
Usage: $0 [ options ] -- /path/to/target_app [ ... ]
Required parameters:
-i dir - input directory with the starting corpus
-o dir - output directory for minimized files
Execution control settings:
-f file - location read by the fuzzed program (stdin)
-m megs - memory limit for child process ($MEM_LIMIT MB)
-t msec - run time limit for child process (none)
-Q - use binary-only instrumentation (QEMU mode)
Minimization settings:
-C - keep crashing inputs, reject everything else
-e - solve for edge coverage only, ignore hit counts
For additional tips, please consult docs/README.
_EOF_
exit 1
fi
# Do a sanity check to discourage the use of /tmp, since we can't really
# handle this safely from a shell script.
echo "$IN_DIR" | grep -qE '^(/var)?/tmp/'
T1="$?"
echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/'
T2="$?"
echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/'
T3="$?"
echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/'
T4="$?"
echo "$PWD" | grep -qE '^(/var)?/tmp/'
T5="$?"
if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then
echo "[-] Error: do not use this script in /tmp or /var/tmp." 1>&2
exit 1
fi
# If @@ is specified, but there's no -f, let's come up with a temporary input
# file name.
TRACE_DIR="$OUT_DIR/.traces"
if [ "$STDIN_FILE" = "" ]; then
if echo "$*" | grep -qF '@@'; then
STDIN_FILE="$TRACE_DIR/.cur_input"
fi
fi
# Check for obvious errors.
if [ ! "$MEM_LIMIT" = "none" ]; then
if [ "$MEM_LIMIT" -lt "5" ]; then
echo "[-] Error: dangerously low memory limit." 1>&2
exit 1
fi
fi
if [ ! "$TIMEOUT" = "none" ]; then
if [ "$TIMEOUT" -lt "10" ]; then
echo "[-] Error: dangerously low timeout." 1>&2
exit 1
fi
fi
if [ ! -f "$TARGET_BIN" -o ! -x "$TARGET_BIN" ]; then
TNEW="`which "$TARGET_BIN" 2>/dev/null`"
if [ ! -f "$TNEW" -o ! -x "$TNEW" ]; then
echo "[-] Error: binary '$TARGET_BIN' not found or not executable." 1>&2
exit 1
fi
TARGET_BIN="$TNEW"
fi
if [ "$AFL_SKIP_BIN_CHECK" = "" -a "$QEMU_MODE" = "" ]; then
if ! grep -qF "__AFL_SHM_ID" "$TARGET_BIN"; then
echo "[-] Error: binary '$TARGET_BIN' doesn't appear to be instrumented." 1>&2
exit 1
fi
fi
if [ ! -d "$IN_DIR" ]; then
echo "[-] Error: directory '$IN_DIR' not found." 1>&2
exit 1
fi
test -d "$IN_DIR/queue" && IN_DIR="$IN_DIR/queue"
find "$OUT_DIR" -name 'id[:_]*' -maxdepth 1 -exec rm -- {} \; 2>/dev/null
rm -rf "$TRACE_DIR" 2>/dev/null
rmdir "$OUT_DIR" 2>/dev/null
if [ -d "$OUT_DIR" ]; then
echo "[-] Error: directory '$OUT_DIR' exists and is not empty - delete it first." 1>&2
exit 1
fi
mkdir -m 700 -p "$TRACE_DIR" || exit 1
if [ ! "$STDIN_FILE" = "" ]; then
rm -f "$STDIN_FILE" || exit 1
touch "$STDIN_FILE" || exit 1
fi
if [ "$AFL_PATH" = "" ]; then
SHOWMAP="${0%/afl-cmin}/afl-showmap"
else
SHOWMAP="$AFL_PATH/afl-showmap"
fi
if [ ! -x "$SHOWMAP" ]; then
echo "[-] Error: can't find 'afl-showmap' - please set AFL_PATH." 1>&2
rm -rf "$TRACE_DIR"
exit 1
fi
IN_COUNT=$((`ls -- "$IN_DIR" 2>/dev/null | wc -l`))
if [ "$IN_COUNT" = "0" ]; then
echo "No inputs in the target directory - nothing to be done."
rm -rf "$TRACE_DIR"
exit 1
fi
FIRST_FILE=`ls "$IN_DIR" | head -1`
if ln "$IN_DIR/$FIRST_FILE" "$TRACE_DIR/.link_test" 2>/dev/null; then
CP_TOOL=ln
else
CP_TOOL=cp
fi
# Make sure that we can actually get anything out of afl-showmap before we
# waste too much time.
echo "[*] Testing the target binary..."
if [ "$STDIN_FILE" = "" ]; then
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$FIRST_FILE"
else
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE"
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
fi
FIRST_COUNT=$((`grep -c . "$TRACE_DIR/.run_test"`))
if [ "$FIRST_COUNT" -gt "0" ]; then
echo "[+] OK, $FIRST_COUNT tuples recorded."
else
echo "[-] Error: no instrumentation output detected (perhaps crash or timeout)." 1>&2
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# Let's roll!
#############################
# STEP 1: COLLECTING TRACES #
#############################
echo "[*] Obtaining traces for input files in '$IN_DIR'..."
(
CUR=0
if [ "$STDIN_FILE" = "" ]; then
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn"
done < <(ls "$IN_DIR")
else
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
cp "$IN_DIR/$fn" "$STDIN_FILE"
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
done < <(ls "$IN_DIR")
fi
)
echo
##########################
# STEP 2: SORTING TUPLES #
##########################
# With this out of the way, we sort all tuples by popularity across all
# datasets. The reasoning here is that we won't be able to avoid the files
# that trigger unique tuples anyway, so we will want to start with them and
# see what's left.
echo "[*] Sorting trace sets (this may take a while)..."
ls "$IN_DIR" | sed "s#^#$TRACE_DIR/#" | tr '\n' '\0' | xargs -0 -n 1 cat | \
sort | uniq -c | sort -n >"$TRACE_DIR/.all_uniq"
TUPLE_COUNT=$((`grep -c . "$TRACE_DIR/.all_uniq"`))
echo "[+] Found $TUPLE_COUNT unique tuples across $IN_COUNT files."
#####################################
# STEP 3: SELECTING CANDIDATE FILES #
#####################################
# The next step is to find the best candidate for each tuple. The "best"
# part is understood simply as the smallest input that includes a particular
# tuple in its trace. Empirical evidence suggests that this produces smaller
# datasets than more involved algorithms that could be still pulled off in
# a shell script.
echo "[*] Finding best candidates for each tuple..."
CUR=0
while read -r fn; do
CUR=$((CUR+1))
printf "\\r Processing file $CUR/$IN_COUNT... "
sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list"
done < <(ls -rS "$IN_DIR")
echo
##############################
# STEP 4: LOADING CANDIDATES #
##############################
# At this point, we have a file of tuple-file pairs, sorted by file size
# in ascending order (as a consequence of ls -rS). By doing sort keyed
# only by tuple (-k 1,1) and configured to output only the first line for
# every key (-s -u), we end up with the smallest file for each tuple.
echo "[*] Sorting candidate list (be patient)..."
sort -k1,1 -s -u "$TRACE_DIR/.candidate_list" | \
sed 's/^/BEST_FILE[/;s/ /]="/;s/$/"/' >"$TRACE_DIR/.candidate_script"
if [ ! -s "$TRACE_DIR/.candidate_script" ]; then
echo "[-] Error: no traces obtained from test cases, check syntax!"
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 1
fi
# The sed command converted the sorted list to a shell script that populates
# BEST_FILE[tuple]="fname". Let's load that!
. "$TRACE_DIR/.candidate_script"
##########################
# STEP 5: WRITING OUTPUT #
##########################
# The final trick is to grab the top pick for each tuple, unless said tuple is
# already set due to the inclusion of an earlier candidate; and then put all
# tuples associated with the newly-added file to the "already have" list. The
# loop works from least popular tuples and toward the most common ones.
echo "[*] Processing candidates and writing output files..."
CUR=0
touch "$TRACE_DIR/.already_have"
while read -r cnt tuple; do
CUR=$((CUR+1))
printf "\\r Processing tuple $CUR/$TUPLE_COUNT... "
# If we already have this tuple, skip it.
grep -q "^$tuple\$" "$TRACE_DIR/.already_have" && continue
FN=${BEST_FILE[tuple]}
$CP_TOOL "$IN_DIR/$FN" "$OUT_DIR/$FN"
if [ "$((CUR % 5))" = "0" ]; then
sort -u "$TRACE_DIR/$FN" "$TRACE_DIR/.already_have" >"$TRACE_DIR/.tmp"
mv -f "$TRACE_DIR/.tmp" "$TRACE_DIR/.already_have"
else
cat "$TRACE_DIR/$FN" >>"$TRACE_DIR/.already_have"
fi
done <"$TRACE_DIR/.all_uniq"
echo
OUT_COUNT=`ls -- "$OUT_DIR" | wc -l`
if [ "$OUT_COUNT" = "1" ]; then
echo "[!] WARNING: All test cases had the same traces, check syntax!"
fi
echo "[+] Narrowed down to $OUT_COUNT files, saved in '$OUT_DIR'."
echo
test "$AFL_KEEP_TRACES" = "" && rm -rf "$TRACE_DIR"
exit 0
================================================
FILE: afl-fuzz.c
================================================
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#define AFL_MAIN
#define MESSAGES_TO_STDOUT
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "config.h"
#include "types.h"
#include "debug.h"
#include "alloc-inl.h"
#include "hash.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#include <signal.h>
#include <dirent.h>
#include <ctype.h>
#include <fcntl.h>
#include <termios.h>
#include <dlfcn.h>
#include <netdb.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <sys/sendfile.h>
#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
# include <sys/sysctl.h>
#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
/* Lots of globals, but mostly for the status UI and other things where it
really makes no sense to haul them around as function parameters. */
static u8 *in_dir, /* Input directory with test cases */
*out_file, /* File to fuzz, if any */
*out_dir, /* Working & output directory */
*sync_dir, /* Synchronization directory */
*sync_id, /* Fuzzer ID */
*use_banner, /* Display banner */
*in_bitmap, /* Input bitmap */
*doc_path, /* Path to documentation dir */
*target_path, /* Path to target binary */
*orig_cmdline; /* Original command line */
static u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */
static u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */
static u32 stats_update_freq = 1; /* Stats update frequency (execs) */
static u8 skip_deterministic, /* Skip deterministic stages? */
force_deterministic, /* Force deterministic stages? */
use_splicing, /* Recombine input files? */
dumb_mode, /* Run in non-instrumented mode? */
score_changed, /* Scoring for favorites changed? */
kill_signal, /* Signal that killed the child */
resuming_fuzz, /* Resuming an older fuzzing job? */
timeout_given, /* Specific timeout given? */
not_on_tty, /* stdout is not a tty */
term_too_small, /* terminal dimensions too small */
uses_asan, /* Target uses ASAN? */
no_forkserver, /* Disable forkserver? */
crash_mode, /* Crash mode! Yeah! */
in_place_resume, /* Attempt in-place resume? */
auto_changed, /* Auto-generated tokens changed? */
no_cpu_meter_red, /* Feng shui on the status screen */
no_var_check, /* Don't detect variable behavior */
bitmap_changed = 1, /* Time to update bitmap? */
qemu_mode, /* Running in QEMU mode? */
skip_requested, /* Skip request, via SIGUSR1 */
run_over10m; /* Run time over 10 minutes? */
static s32 out_fd, /* Persistent fd for out_file */
dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */
dev_null_fd = -1, /* Persistent fd for /dev/null */
fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
static s32 forksrv_pid, /* PID of the fork server */
child_pid = -1, /* PID of the fuzzed program */
out_dir_fd = -1; /* FD of the lock file */
static u8* trace_bits; /* SHM with instrumentation bitmap */
static u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
virgin_hang[MAP_SIZE], /* Bits we haven't seen in hangs */
virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
static s32 shm_id; /* ID of the SHM region */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
clear_screen = 1, /* Window resized? */
child_timed_out; /* Traced process timed out? */
static u32 queued_paths, /* Total number of queued testcases */
queued_variable, /* Testcases with variable behavior */
queued_at_start, /* Total number of initial inputs */
queued_discovered, /* Items discovered during this run */
queued_imported, /* Items imported via -S */
queued_favored, /* Paths deemed favorable */
queued_with_cov, /* Paths with new coverage bytes */
pending_not_fuzzed, /* Queued but not done yet */
pending_favored, /* Pending favored paths */
cur_skipped_paths, /* Abandoned inputs in cur cycle */
cur_depth, /* Current path depth */
max_depth, /* Max path depth */
useless_at_start, /* Number of useless starting paths */
current_entry, /* Current queue entry ID */
havoc_div = 1; /* Cycle count divisor for havoc */
static u64 total_crashes, /* Total number of crashes */
unique_crashes, /* Crashes with unique signatures */
total_hangs, /* Total number of hangs */
unique_hangs, /* Hangs with unique signatures */
total_execs, /* Total execve() calls */
start_time, /* Unix start time (ms) */
last_path_time, /* Time for most recent path (ms) */
last_crash_time, /* Time for most recent crash (ms) */
last_hang_time, /* Time for most recent hang (ms) */
queue_cycle, /* Queue round counter */
cycles_wo_finds, /* Cycles without any new paths */
trim_execs, /* Execs done to trim input files */
bytes_trim_in, /* Bytes coming into the trimmer */
bytes_trim_out, /* Bytes coming outa the trimmer */
blocks_eff_total, /* Blocks subject to effector maps */
blocks_eff_select; /* Blocks selected as fuzzable */
static u32 subseq_hangs; /* Number of hangs in a row */
static u8 *stage_name = "init", /* Name of the current fuzz stage */
*stage_short, /* Short stage name */
*syncing_party; /* Currently syncing with... */
static s32 stage_cur, stage_max; /* Stage progression */
static s32 splicing_with = -1; /* Splicing with which test case? */
static u32 syncing_case; /* Syncing with case #... */
static s32 stage_cur_byte, /* Byte offset of current stage op */
stage_cur_val; /* Value used for stage op */
static u8 stage_val_type; /* Value type (STAGE_VAL_*) */
static u64 stage_finds[32], /* Patterns found per fuzz stage */
stage_cycles[32]; /* Execs per fuzz stage */
static u32 rand_cnt; /* Random number counter */
static u64 total_cal_us, /* Total calibration time (us) */
total_cal_cycles; /* Total calibration cycles */
static u64 total_bitmap_size, /* Total bit count for all bitmaps */
total_bitmap_entries; /* Number of bitmaps counted */
static u32 cpu_core_count; /* CPU core count */
static FILE* plot_file; /* Gnuplot output file */
/* Globals for network support */
static struct addrinfo *N_results = NULL, /* for results from getaddrinfo() */
*N_rp = NULL; /* to iterate through N_results[] */
static struct sockaddr_storage N_myaddr; /* to hold send port info */
static struct sockaddr_storage N_server_addr; /* and server (send side) */
static socklen_t N_myaddrlen = sizeof (struct sockaddr_storage);
/* and length of both */
static u32 N_option_specified = 0; /* 1 if a -N option is present */
static u8* N_option_string = 0; /* points to copy of -N option str */
static u32 N_slen = 0; /* length of the -N option string */
static u32 N_valid = 0; /* 1 if valid URL option to -N */
static u32 N_fuzz_client = 0; /* 1 if target is a network client */
static u32 N_myaddr_valid = 0; /* use established conn or addr */
static s32 N_fd; /* for network file descriptor */
static u32 N_timeout_given = 0; /* use delay before network I/O */
static u32 N_exec_tmout = 0; /* network I/O delay in msec */
static struct timespec N_it; /* structure for nanosleep() call */
struct queue_entry {
u8* fname; /* File name for the test case */
u32 len; /* Input length */
u8 cal_failed, /* Calibration failed? */
trim_done, /* Trimmed? */
was_fuzzed, /* Had any fuzzing done yet? */
passed_det, /* Deterministic stages passed? */
has_new_cov, /* Triggers new coverage? */
var_behavior, /* Variable behavior? */
favored, /* Currently favored? */
fs_redundant; /* Marked as redundant in the fs? */
u32 bitmap_size, /* Number of bits set in bitmap */
exec_cksum; /* Checksum of the execution trace */
u64 exec_us, /* Execution time (us) */
handicap, /* Number of queue cycles behind */
depth; /* Path depth */
u8* trace_mini; /* Trace bytes, if kept */
u32 tc_ref; /* Trace bytes ref count */
struct queue_entry *next, /* Next element, if any */
*next_100; /* 100 elements ahead */
};
static struct queue_entry *queue, /* Fuzzing queue (linked list) */
*queue_cur, /* Current offset within the queue */
*queue_top, /* Top of the list */
*q_prev100; /* Previous 100 marker */
static struct queue_entry*
top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
struct extra_data {
u8* data; /* Dictionary token data */
u32 len; /* Dictionary token length */
u32 hit_cnt; /* Use count in the corpus */
};
static struct extra_data* extras; /* Extra tokens to fuzz with */
static u32 extras_cnt; /* Total number of tokens read */
static struct extra_data* a_extras; /* Automatically selected extras */
static u32 a_extras_cnt; /* Total number of tokens available */
static u8* (*post_handler)(u8* buf, u32* len);
/* Interesting values, as per config.h */
static s8 interesting_8[] = { INTERESTING_8 };
static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
/* Fuzzing stages */
enum {
/* 00 */ STAGE_FLIP1,
/* 01 */ STAGE_FLIP2,
/* 02 */ STAGE_FLIP4,
/* 03 */ STAGE_FLIP8,
/* 04 */ STAGE_FLIP16,
/* 05 */ STAGE_FLIP32,
/* 06 */ STAGE_ARITH8,
/* 07 */ STAGE_ARITH16,
/* 08 */ STAGE_ARITH32,
/* 09 */ STAGE_INTEREST8,
/* 10 */ STAGE_INTEREST16,
/* 11 */ STAGE_INTEREST32,
/* 12 */ STAGE_EXTRAS_UO,
/* 13 */ STAGE_EXTRAS_UI,
/* 14 */ STAGE_EXTRAS_AO,
/* 15 */ STAGE_HAVOC,
/* 16 */ STAGE_SPLICE
};
/* Stage value types */
enum {
/* 00 */ STAGE_VAL_NONE,
/* 01 */ STAGE_VAL_LE,
/* 02 */ STAGE_VAL_BE
};
/* Execution status fault codes */
enum {
/* 00 */ FAULT_NONE,
/* 01 */ FAULT_HANG,
/* 02 */ FAULT_CRASH,
/* 03 */ FAULT_ERROR,
/* 04 */ FAULT_NOINST,
/* 05 */ FAULT_NOBITS
};
/* Get unix time in milliseconds */
static u64 get_cur_time(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
/* Get unix time in microseconds */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
/* Generate a random number (from 0 to limit - 1). This may
have slight bias. */
static inline u32 UR(u32 limit) {
if (!rand_cnt--) {
u32 seed[2];
ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
srandom(seed[0]);
rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
}
return random() % limit;
}
#ifndef IGNORE_FINDS
/* Helper function to compare buffers; returns first and last differing offset. We
use this to find reasonable locations for splicing two files. */
static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
s32 f_loc = -1;
s32 l_loc = -1;
u32 pos;
for (pos = 0; pos < len; pos++) {
if (*(ptr1++) != *(ptr2++)) {
if (f_loc == -1) f_loc = pos;
l_loc = pos;
}
}
*first = f_loc;
*last = l_loc;
return;
}
#endif /* !IGNORE_FINDS */
/* Describe integer. Uses 12 cyclic static buffers for return values. The value
returned should be five characters or less for all the integers we reasonably
expect to see. */
static u8* DI(u64 val) {
static u8 tmp[12][16];
static u8 cur;
cur = (cur + 1) % 12;
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
if (val < (_divisor) * (_limit_mult)) { \
sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
return tmp[cur]; \
} \
} while (0)
/* 0-9999 */
CHK_FORMAT(1, 10000, "%llu", u64);
/* 10.0k - 99.9k */
CHK_FORMAT(1000, 99.95, "%0.01fk", double);
/* 100k - 999k */
CHK_FORMAT(1000, 1000, "%lluk", u64);
/* 1.00M - 9.99M */
CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
/* 10.0M - 99.9M */
CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
/* 100M - 999M */
CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
/* 1.00G - 9.99G */
CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
/* 10.0G - 99.9G */
CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
/* 100G - 999G */
CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
/* 1.00T - 9.99G */
CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
/* 10.0T - 99.9T */
CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
/* 100T+ */
strcpy(tmp[cur], "infty");
return tmp[cur];
}
/* Describe float. Similar to the above, except with a single
static buffer. */
static u8* DF(double val) {
static u8 tmp[16];
if (val < 99.995) {
sprintf(tmp, "%0.02f", val);
return tmp;
}
if (val < 999.95) {
sprintf(tmp, "%0.01f", val);
return tmp;
}
return DI((u64)val);
}
/* Describe integer as memory size. */
static u8* DMS(u64 val) {
static u8 tmp[12][16];
static u8 cur;
cur = (cur + 1) % 12;
/* 0-9999 */
CHK_FORMAT(1, 10000, "%llu B", u64);
/* 10.0k - 99.9k */
CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
/* 100k - 999k */
CHK_FORMAT(1024, 1000, "%llu kB", u64);
/* 1.00M - 9.99M */
CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
/* 10.0M - 99.9M */
CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
/* 100M - 999M */
CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
/* 1.00G - 9.99G */
CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
/* 10.0G - 99.9G */
CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
/* 100G - 999G */
CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
/* 1.00T - 9.99G */
CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
/* 10.0T - 99.9T */
CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
#undef CHK_FORMAT
/* 100T+ */
strcpy(tmp[cur], "infty");
return tmp[cur];
}
/* Describe time delta. Returns one static buffer, 34 chars of less. */
static u8* DTD(u64 cur_ms, u64 event_ms) {
static u8 tmp[64];
u64 delta;
s32 t_d, t_h, t_m, t_s;
if (!event_ms) return "none seen yet";
delta = cur_ms - event_ms;
t_d = delta / 1000 / 60 / 60 / 24;
t_h = (delta / 1000 / 60 / 60) % 24;
t_m = (delta / 1000 / 60) % 60;
t_s = (delta / 1000) % 60;
sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s);
return tmp;
}
/* Mark deterministic checks as done for a particular queue entry. We use the
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
static void mark_as_det_done(struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
ck_free(fn);
q->passed_det = 1;
}
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
static void mark_as_variable(struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
ldest = alloc_printf("../../%s", fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
if (symlink(ldest, fn)) {
s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
}
ck_free(ldest);
ck_free(fn);
q->var_behavior = 1;
}
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
static void mark_as_redundant(struct queue_entry* q, u8 state) {
u8* fn;
s32 fd;
if (state == q->fs_redundant) return;
q->fs_redundant = state;
fn = strrchr(q->fname, '/');
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
if (state) {
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
} else {
if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
}
ck_free(fn);
}
/* Append new test case to the queue. */
static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
q->fname = fname;
q->len = len;
q->depth = cur_depth + 1;
q->passed_det = passed_det;
if (q->depth > max_depth) max_depth = q->depth;
if (queue_top) {
queue_top->next = q;
queue_top = q;
} else q_prev100 = queue = queue_top = q;
queued_paths++;
pending_not_fuzzed++;
if (!(queued_paths % 100)) {
q_prev100->next_100 = q;
q_prev100 = q;
}
last_path_time = get_cur_time();
}
/* Destroy the entire queue. */
static void destroy_queue(void) {
struct queue_entry *q = queue, *n;
while (q) {
n = q->next;
ck_free(q->fname);
ck_free(q->trace_mini);
ck_free(q);
q = n;
}
}
/* Write bitmap to file. The bitmap is useful mostly for the secret
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
static void write_bitmap(void) {
u8* fname;
s32 fd;
if (!bitmap_changed) return;
bitmap_changed = 0;
fname = alloc_printf("%s/fuzz_bitmap", out_dir);
fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_write(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
ck_free(fname);
}
/* Read bitmap from file. This is for the -B option again. */
static void read_bitmap(u8* fname) {
s32 fd = open(fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_read(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
}
/* Check if the current execution path brings anything new to the table.
Update virgin bits to reflect the finds. Returns 1 if the only change is
the hit-count for a particular tuple; 2 if there are new tuples seen.
Updates the map, so subsequent calls will always return 0.
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
#define FFL(_b) (0xffULL << ((_b) << 3))
#define FF(_b) (0xff << ((_b) << 3))
static inline u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__
u64* current = (u64*)trace_bits;
u64* virgin = (u64*)virgin_map;
u32 i = (MAP_SIZE >> 3);
#else
u32* current = (u32*)trace_bits;
u32* virgin = (u32*)virgin_map;
u32 i = (MAP_SIZE >> 2);
#endif /* ^__x86_64__ */
u8 ret = 0;
while (i--) {
#ifdef __x86_64__
u64 cur = *current;
u64 vir = *virgin;
#else
u32 cur = *current;
u32 vir = *virgin;
#endif /* ^__x86_64__ */
/* Optimize for *current == ~*virgin, since this will almost always be the
case. */
if (cur & vir) {
if (ret < 2) {
/* This trace did not have any new bytes yet; see if there's any
current[] byte that is non-zero when virgin[] is 0xff. */
#ifdef __x86_64__
if (((cur & FFL(0)) && (vir & FFL(0)) == FFL(0)) ||
((cur & FFL(1)) && (vir & FFL(1)) == FFL(1)) ||
((cur & FFL(2)) && (vir & FFL(2)) == FFL(2)) ||
((cur & FFL(3)) && (vir & FFL(3)) == FFL(3)) ||
((cur & FFL(4)) && (vir & FFL(4)) == FFL(4)) ||
((cur & FFL(5)) && (vir & FFL(5)) == FFL(5)) ||
((cur & FFL(6)) && (vir & FFL(6)) == FFL(6)) ||
((cur & FFL(7)) && (vir & FFL(7)) == FFL(7))) ret = 2;
else ret = 1;
#else
if (((cur & FF(0)) && (vir & FF(0)) == FF(0)) ||
((cur & FF(1)) && (vir & FF(1)) == FF(1)) ||
((cur & FF(2)) && (vir & FF(2)) == FF(2)) ||
((cur & FF(3)) && (vir & FF(3)) == FF(3))) ret = 2;
else ret = 1;
#endif /* ^__x86_64__ */
}
*virgin = vir & ~cur;
}
current++;
virgin++;
}
if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
return ret;
}
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
static u32 count_bits(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This gets called on the inverse, virgin bitmap; optimize for sparse
data. */
if (v == 0xffffffff) {
ret += 32;
continue;
}
v -= ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
}
return ret;
}
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
mostly to update the status screen or calibrate and examine confirmed
new paths. */
static u32 count_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
if (!v) continue;
if (v & FF(0)) ret++;
if (v & FF(1)) ret++;
if (v & FF(2)) ret++;
if (v & FF(3)) ret++;
}
return ret;
}
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
static u32 count_non_255_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This is called on the virgin bitmap, so optimize for the most likely
case. */
if (v == 0xffffffff) continue;
if ((v & FF(0)) != FF(0)) ret++;
if ((v & FF(1)) != FF(1)) ret++;
if ((v & FF(2)) != FF(2)) ret++;
if ((v & FF(3)) != FF(3)) ret++;
}
return ret;
}
/* Destructively simplify trace by eliminating hit count information
and replacing it with 0x80 or 0x01 depending on whether the tuple
is hit or not. Called on every new crash or hang, should be
reasonably fast. */
#define AREP4(_sym) (_sym), (_sym), (_sym), (_sym)
#define AREP8(_sym) AREP4(_sym), AREP4(_sym)
#define AREP16(_sym) AREP8(_sym), AREP8(_sym)
#define AREP32(_sym) AREP16(_sym), AREP16(_sym)
#define AREP64(_sym) AREP32(_sym), AREP32(_sym)
#define AREP128(_sym) AREP64(_sym), AREP64(_sym)
static u8 simplify_lookup[256] = {
/* 4 */ 1, 128, 128, 128,
/* +4 */ AREP4(128),
/* +8 */ AREP8(128),
/* +16 */ AREP16(128),
/* +32 */ AREP32(128),
/* +64 */ AREP64(128),
/* +128 */ AREP128(128)
};
#ifdef __x86_64__
static void simplify_trace(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (*mem) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
mem8[4] = simplify_lookup[mem8[4]];
mem8[5] = simplify_lookup[mem8[5]];
mem8[6] = simplify_lookup[mem8[6]];
mem8[7] = simplify_lookup[mem8[7]];
} else *mem = 0x0101010101010101ULL;
mem++;
}
}
#else
static void simplify_trace(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (*mem) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
} else *mem = 0x01010101;
mem++;
}
}
#endif /* ^__x86_64__ */
/* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
static u8 count_class_lookup[256] = {
/* 0 - 3: 4 */ 0, 1, 2, 4,
/* 4 - 7: +4 */ AREP4(8),
/* 8 - 15: +8 */ AREP8(16),
/* 16 - 31: +16 */ AREP16(32),
/* 32 - 127: +96 */ AREP64(64), AREP32(64),
/* 128+: +128 */ AREP128(128)
};
#ifdef __x86_64__
static inline void classify_counts(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (*mem) {
u8* mem8 = (u8*)mem;
mem8[0] = count_class_lookup[mem8[0]];
mem8[1] = count_class_lookup[mem8[1]];
mem8[2] = count_class_lookup[mem8[2]];
mem8[3] = count_class_lookup[mem8[3]];
mem8[4] = count_class_lookup[mem8[4]];
mem8[5] = count_class_lookup[mem8[5]];
mem8[6] = count_class_lookup[mem8[6]];
mem8[7] = count_class_lookup[mem8[7]];
}
mem++;
}
}
#else
static inline void classify_counts(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (*mem) {
u8* mem8 = (u8*)mem;
mem8[0] = count_class_lookup[mem8[0]];
mem8[1] = count_class_lookup[mem8[1]];
mem8[2] = count_class_lookup[mem8[2]];
mem8[3] = count_class_lookup[mem8[3]];
}
mem++;
}
}
#endif /* ^__x86_64__ */
/* Get rid of shared memory (atexit handler). */
static void remove_shm(void) {
shmctl(shm_id, IPC_RMID, NULL);
}
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some
new paths. */
static void minimize_bits(u8* dst, u8* src) {
u32 i = 0;
while (i < MAP_SIZE) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
i++;
}
}
/* When we bump into a new path, we call this to see if the path appears
more "favorable" than any of the existing ones. The purpose of the
"favorables" is to have a minimal set of paths that trigger all the bits
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
The first step of the process is to maintain a list of top_rated[] entries
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
static void update_bitmap_score(struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
/* For every byte set in trace_bits[], see if there is a previous winner,
and how it compares to us. */
for (i = 0; i < MAP_SIZE; i++)
if (trace_bits[i]) {
if (top_rated[i]) {
/* Faster-executing or smaller test cases are favored. */
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
}
}
/* Insert ourselves as the new winner. */
top_rated[i] = q;
q->tc_ref++;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
}
score_changed = 1;
}
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
until the next run. The favored entries are given more air time during
all fuzzing steps. */
static void cull_queue(void) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
u32 i;
if (dumb_mode || !score_changed) return;
score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
queued_favored = 0;
pending_favored = 0;
q = queue;
while (q) {
q->favored = 0;
q = q->next;
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; i++)
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
/* Remove all bits belonging to the current entry from temp_v. */
while (j--)
if (top_rated[i]->trace_mini[j])
temp_v[j] &= ~top_rated[i]->trace_mini[j];
top_rated[i]->favored = 1;
queued_favored++;
if (!top_rated[i]->was_fuzzed) pending_favored++;
}
q = queue;
while (q) {
mark_as_redundant(q, !q->favored);
q = q->next;
}
}
/* Configure shared memory and virgin_bits. This is called at startup. */
static void setup_shm(void) {
u8* shm_str;
if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
memset(virgin_hang, 255, MAP_SIZE);
memset(virgin_crash, 255, MAP_SIZE);
shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
if (shm_id < 0) PFATAL("shmget() failed");
atexit(remove_shm);
shm_str = alloc_printf("%d", shm_id);
/* If somebody is asking us to fuzz instrumented binaries in dumb mode,
we don't want them to detect instrumentation, since we won't be sending
fork server commands. This should be replaced with better auto-detection
later on, perhaps? */
if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);
ck_free(shm_str);
trace_bits = shmat(shm_id, NULL, 0);
if (!trace_bits) PFATAL("shmat() failed");
}
/* Load postprocessor, if available. */
static void setup_post(void) {
void* dh;
u8* fn = getenv("AFL_POST_LIBRARY");
u32 tlen = 6;
if (!fn) return;
ACTF("Loading postprocessor from '%s'...", fn);
dh = dlopen(fn, RTLD_NOW);
if (!dh) FATAL("%s", dlerror());
post_handler = dlsym(dh, "afl_postprocess");
if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
/* Do a quick test. It's better to segfault now than later =) */
post_handler("hello", &tlen);
OKF("Postprocessor installed successfully.");
}
/* Read all testcases from the input directory, then queue them for testing.
Called at startup. */
static void read_testcases(void) {
struct dirent **nl;
s32 nl_cnt;
u32 i;
u8* fn;
/* Auto-detect non-in-place resumption attempts. */
fn = alloc_printf("%s/queue", in_dir);
if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn);
ACTF("Scanning '%s'...", in_dir);
/* We use scandir() + alphasort() rather than readdir() because otherwise,
the ordering of test cases would vary somewhat randomly and would be
difficult to control. */
nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
if (nl_cnt < 0) {
if (errno == ENOENT || errno == ENOTDIR)
SAYF("\n" cLRD "[-] " cRST
"The input directory does not seem to be valid - try again. The fuzzer needs\n"
" one or more test case to start with - ideally, a small file under 1 kB\n"
" or so. The cases must be stored as regular files directly in the input\n"
" directory.\n");
PFATAL("Unable to open '%s'", in_dir);
}
for (i = 0; i < nl_cnt; i++) {
struct stat st;
u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
u8 passed_det = 0;
free(nl[i]); /* not tracked */
if (lstat(fn, &st) || access(fn, R_OK))
PFATAL("Unable to access '%s'", fn);
/* This also takes care of . and .. */
if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) {
ck_free(fn);
ck_free(dfn);
continue;
}
if (st.st_size > MAX_FILE)
FATAL("Test case '%s' is too big (%s, limit is %s)", fn,
DMS(st.st_size), DMS(MAX_FILE));
/* Check for metadata that indicates that deterministic fuzzing
is complete for this entry. We don't want to repeat deterministic
fuzzing when resuming aborted scans, because it would be pointless
and probably very time-consuming. */
if (!access(dfn, F_OK)) passed_det = 1;
ck_free(dfn);
add_to_queue(fn, st.st_size, passed_det);
}
free(nl); /* not tracked */
if (!queued_paths) {
SAYF("\n" cLRD "[-] " cRST
"Looks like there are no valid test cases in the input directory! The fuzzer\n"
" needs one or more test case to start with - ideally, a small file under\n"
" 1 kB or so. The cases must be stored as regular files directly in the\n"
" input directory.\n");
FATAL("No usable test cases in '%s'", in_dir);
}
last_path_time = 0;
queued_at_start = queued_paths;
}
/* Helper function for load_extras. */
static int compare_extras_len(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
return e1->len - e2->len;
}
static int compare_extras_use_d(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
return e2->hit_cnt - e1->hit_cnt;
}
/* Read extras from a file, sort by size. */
static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
u32 dict_level) {
FILE* f;
u8 buf[MAX_LINE];
u8 *lptr;
u32 cur_line = 0;
f = fopen(fname, "r");
if (!f) PFATAL("Unable to open '%s'", fname);
while ((lptr = fgets(buf, MAX_LINE, f))) {
u8 *rptr, *wptr;
u32 klen = 0;
cur_line++;
/* Trim on left and right. */
while (isspace(*lptr)) lptr++;
rptr = lptr + strlen(lptr) - 1;
while (rptr >= lptr && isspace(*rptr)) rptr--;
rptr++;
*rptr = 0;
/* Skip empty lines and comments. */
if (!*lptr || *lptr == '#') continue;
/* All other lines must end with '"', which we can consume. */
rptr--;
if (rptr < lptr || *rptr != '"')
FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
*rptr = 0;
/* Skip alphanumerics and dashes (label). */
while (isalnum(*lptr) || *lptr == '_') lptr++;
/* If @number follows, parse that. */
if (*lptr == '@') {
lptr++;
if (atoi(lptr) > dict_level) continue;
while (isdigit(*lptr)) lptr++;
}
/* Skip whitespace and = signs. */
while (isspace(*lptr) || *lptr == '=') lptr++;
/* Consume opening '"'. */
if (*lptr != '"')
FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
lptr++;
if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
/* Okay, let's allocate memory and copy data between "...", handling
\xNN escaping, \\, and \". */
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
while (*lptr) {
char* hexdigits = "0123456789abcdef";
switch (*lptr) {
case 1 ... 31:
case 128 ... 255:
FATAL("Non-printable characters in line %u.", cur_line);
case '\\':
lptr++;
if (*lptr == '\\' || *lptr == '"') {
*(wptr++) = *(lptr++);
klen++;
break;
}
if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
*(wptr++) =
((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
(strchr(hexdigits, tolower(lptr[2])) - hexdigits);
lptr += 3;
klen++;
break;
default:
*(wptr++) = *(lptr++);
klen++;
}
}
extras[extras_cnt].len = klen;
if (extras[extras_cnt].len > MAX_DICT_FILE)
FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
DMS(klen), DMS(MAX_DICT_FILE));
if (*min_len > klen) *min_len = klen;
if (*max_len < klen) *max_len = klen;
extras_cnt++;
}
fclose(f);
}
/* Read extras from the extras directory and sort them by size. */
static void load_extras(u8* dir) {
DIR* d;
struct dirent* de;
u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
u8* x;
/* If the name ends with @, extract level and continue. */
if ((x = strchr(dir, '@'))) {
*x = 0;
dict_level = atoi(x + 1);
}
ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
d = opendir(dir);
if (!d) {
if (errno == ENOTDIR) {
load_extras_file(dir, &min_len, &max_len, dict_level);
goto check_and_sort;
}
PFATAL("Unable to open '%s'", dir);
}
if (x) FATAL("Dictinary levels not supported for directories.");
while ((de = readdir(d))) {
struct stat st;
u8* fn = alloc_printf("%s/%s", dir, de->d_name);
s32 fd;
if (lstat(fn, &st) || access(fn, R_OK))
PFATAL("Unable to access '%s'", fn);
/* This also takes care of . and .. */
if (!S_ISREG(st.st_mode) || !st.st_size) {
ck_free(fn);
continue;
}
if (st.st_size > MAX_DICT_FILE)
FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
DMS(st.st_size), DMS(MAX_DICT_FILE));
if (min_len > st.st_size) min_len = st.st_size;
if (max_len < st.st_size) max_len = st.st_size;
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
extras[extras_cnt].data = ck_alloc(st.st_size);
extras[extras_cnt].len = st.st_size;
fd = open(fn, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fn);
ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
close(fd);
ck_free(fn);
extras_cnt++;
}
closedir(d);
check_and_sort:
if (!extras_cnt) FATAL("No usable files in '%s'", dir);
qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
DMS(min_len), DMS(max_len));
if (max_len > 32)
WARNF("Some tokens are relatively large (%s) - consider trimming.",
DMS(max_len));
if (extras_cnt > MAX_DET_EXTRAS)
WARNF("More than %u tokens - will use them probabilistically.",
MAX_DET_EXTRAS);
}
/* Helper function for maybe_add_auto() */
static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
return 0;
}
/* Maybe add automatic extra. */
static void maybe_add_auto(u8* mem, u32 len) {
u32 i;
/* Allow users to specify that they don't want auto dictionaries. */
if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
/* Skip runs of identical bytes. */
for (i = 1; i < len; i++)
if (mem[0] ^ mem[i]) break;
if (i == len) return;
/* Reject builtin interesting values. */
if (len == 2) {
i = sizeof(interesting_16) >> 1;
while (i--)
if (*((u16*)mem) == interesting_16[i] ||
*((u16*)mem) == SWAP16(interesting_16[i])) return;
}
if (len == 4) {
i = sizeof(interesting_32) >> 2;
while (i--)
if (*((u32*)mem) == interesting_32[i] ||
*((u32*)mem) == SWAP32(interesting_32[i])) return;
}
/* Reject anything that matches existing extras. Do a case-insensitive
match. We optimize by exploiting the fact that extras[] are sorted
by size. */
for (i = 0; i < extras_cnt; i++)
if (extras[i].len >= len) break;
for (; i < extras_cnt && extras[i].len == len; i++)
if (!memcmp_nocase(extras[i].data, mem, len)) return;
/* Last but not least, check a_extras[] for matches. There are no
guarantees of a particular sort order. */
auto_changed = 1;
for (i = 0; i < a_extras_cnt; i++) {
if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
a_extras[i].hit_cnt++;
goto sort_a_extras;
}
}
/* At this point, looks like we're dealing with a new entry. So, let's
append it if we have room. Otherwise, let's randomly evict some other
entry from the bottom half of the list. */
if (a_extras_cnt < MAX_AUTO_EXTRAS) {
a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
sizeof(struct extra_data));
a_extras[a_extras_cnt].data = ck_memdup(mem, len);
a_extras[a_extras_cnt].len = len;
a_extras_cnt++;
} else {
i = MAX_AUTO_EXTRAS / 2 +
UR((MAX_AUTO_EXTRAS + 1) / 2);
ck_free(a_extras[i].data);
a_extras[i].data = ck_memdup(mem, len);
a_extras[i].len = len;
a_extras[i].hit_cnt = 0;
}
sort_a_extras:
/* First, sort all auto extras by use count, descending order. */
qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
compare_extras_use_d);
/* Then, sort the top USE_AUTO_EXTRAS entries by size. */
qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
sizeof(struct extra_data), compare_extras_len);
}
/* Save automatically generated extras. */
static void save_auto(void) {
u32 i;
if (!auto_changed) return;
auto_changed = 0;
for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) {
u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
close(fd);
ck_free(fn);
}
}
/* Load automatically generated extras. */
static void load_auto(void) {
u32 i;
for (i = 0; i < USE_AUTO_EXTRAS; i++) {
u8 tmp[MAX_AUTO_EXTRA + 1];
u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
s32 fd, len;
fd = open(fn, O_RDONLY, 0600);
if (fd < 0) {
if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
ck_free(fn);
break;
}
/* We read one byte more to cheaply detect tokens that are too
long (and skip them). */
len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
if (len < 0) PFATAL("Unable to read from '%s'", fn);
if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
maybe_add_auto(tmp, len);
close(fd);
ck_free(fn);
}
if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
else OKF("No auto-generated dictionary tokens to reuse.");
}
/* Destroy extras. */
static void destroy_extras(void) {
u32 i;
for (i = 0; i < extras_cnt; i++)
ck_free(extras[i].data);
ck_free(extras);
for (i = 0; i < a_extras_cnt; i++)
ck_free(a_extras[i].data);
ck_free(a_extras);
}
/* Code to fuzz targets across localhost/127.0.0.1/::1 network interface
*
* The network fuzzing code operates in each of two modes depending upon
* the type of target:
*
* (1) as a "listener" or "server" to fuzz targets that send a request to
* another process and expect a response. These targets are called
* "clients". The relevant functions are network_setup_listener(),
* which creates a socket and binds that socket to a (local) port
* specified on the command line, and network_listen(), which expects
* to receive a packet (UDP) or stream of data (TCP) from the target
* and sends a fuzzed response. This mode is selected using the -L
* command line option, together with the -N command line option.
*
* (2) as a "client" to fuzz targets that expect to receive a request from
* another process. These targets are called "servers" or "daemons".
* The relevant function is network_send(), which sends a fuzzed
* packet (UDP) or stream of data (TCP) to the target. This mode is
* selected using the -N command line option without the -L command
* line option.
*
* */
void network_setup_listener(void) {
/* exit if getaddrinfo() did not return address information structures
* that match the specification on the command line */
if (N_results != NULL) {
/* two cases: SOCK_STREAM (for TCP) and SOCK_DGRAM (for UDP) */
if (N_results->ai_socktype == SOCK_STREAM) {
/* TCP (stream) and connections are used.
*
* A connection must be established from the target each
* time network_listen() is called, and closed after the data are
* transfered. network_setup_listener() creates a stream socket
* (with the file descriptor N_fd) and listens for connection requests.
* This must be done before a target that expects to connect is executed.
* N_myaddr_valid tells the codes that the listening socket has been
* setup (and keeps this code from running twice as a safety net).
* UDP is connectionless and quite different. See below.
*
* Local variables: */
int optval = 1;
if (N_myaddr_valid == 0) { /* don't do this twice! */
/* Find the first address that works and use it. */
for (N_rp = N_results; N_rp != NULL; N_rp = N_rp->ai_next) {
/* create the socket, skipping to the next addrinfo object on failure */
N_fd = socket(N_rp->ai_family, N_rp->ai_socktype, N_rp->ai_protocol);
if (N_fd == -1) {
close(N_fd);
continue;
}
/* set the socket option to reuse both the address and port */
if (setsockopt(N_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &optval,
sizeof (optval)) == -1) {
close(N_fd);
PFATAL("failed to set socket option (TCP case)");
}
/* if bind() succeeds, we have found an address that works */
if (bind(N_fd, N_rp->ai_addr, N_rp->ai_addrlen) != -1) {
break;
}
close(N_fd);
}
/* if none is found, the user needs to examine the argument list */
if (N_rp == NULL) {
FATAL("failed to bind socket");
}
/* listen for connection attempts. this can fail if another process
* is listening to the same port and address */
if (listen(N_fd, 8) == -1) PFATAL("listen() failed");
/* indicate that the socket has been created & bound to a port, and
* that the process is listening for connection attempts. */
N_myaddr_valid = 1;
}
} else if (N_results->ai_socktype == SOCK_DGRAM) {
/* UDP datagrams are used.
*
* Create a socket to be used to both receive and send packets, referenced
* by the file descriptor N_fd.
*
* N_fd is kept open for the duration of the afl run (closed on exit)
* and reused. N_myaddr_valid signals the code that the UDP socket
* has been set up and bound to the sending side of the address & port.
*
* First time: find the appropriate sockaddr structure to be used and
* set up the sending side's socket. After the first time's successful
* execution, N_rp points to the address information corresonding to
* the sending side's socket information.
*
* Local variables:
*/
int optval = 1;
if (N_myaddr_valid == 0) {
for (N_rp = N_results; N_rp != NULL; N_rp = N_rp->ai_next) {
/* create the socket, skipping to the next addrinfo object on failure */
N_fd = socket(N_rp->ai_family, N_rp->ai_socktype, N_rp->ai_protocol);
if (N_fd == -1) {
fprintf(stderr, "socket() call failed\n");
close(N_fd);
continue;
}
/* set the socket option to reuse both the address and port */
if (setsockopt(N_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &optval,
sizeof (optval)) == -1) {
close(N_fd);
PFATAL("failed to set socket option (TCP case)");
}
/* if bind() succeeds, we have found an address that works */
if (bind(N_fd, N_rp->ai_addr, N_rp->ai_addrlen) != -1) {
break;
}
close(N_fd);
}
/* if none is found, the user needs to examine the argument list */
if (N_rp == NULL) {
FATAL("failed to bind socket");
}
/* indicate that the socket has been created & bound to a port, and
* that the process is listening for connection attempts. */
N_myaddr_valid = 1;
}
}
} else {
/* getaddrinfo() failed to return results matching the spec on the
* command line. */
FATAL("no matching results from getaddrinfo()");
}
}
int network_listen(void) {
/* This function receives data from the target process, and then sends
* fuzzed data back to it. There are two cases:
*
* (1) TCP (streams): a connection attempt from the target process is
* solicited. When the connection has been established, all available
* data are read using non-blocking I/O, and then fuzzed data are
* written.
*
* (2) UDP (datagrams/packets): all available packets are read using
* non-blocking I/O, and then fuzzed data are written.
*
* In both cases, all data read are discarded. Note that for UDP reads
* any data in excess of the size of the read buffer are discarded by the
* network stack.
*
* Note that non-blocking reads are attempted, and if they fail then the
* calling process is expected to wait for a programmed interval of time
* (specified by the -D command line argument) and retry the call to
* network_listen(), for a programmed number of times (not user-selectable).
*
* Note that unlike the case where this code plays the role of a client to
* the target process (using network_send()), we typically have no control
* over the target's reuse (or not) of ephemeral port numbers. Therefore,
* we are at the mercy of the network stack's ability to scavenge available
* port numbers. A recent Linux kernel appears to do this quite well;
* other operating systems may not.
*
* Local variables:
*/
u32 MAXRECVBUFSIZE = 512;
u8 recvbuf[MAXRECVBUFSIZE];
s32 currreadlen, client_fd, fd, o;
/* network_setup_listener() must be called first, and must succeed */
if (!N_myaddr_valid)
FATAL("error: network_listen() called before network_setup_listener()");
/* Two cases: SOCK_STREAM (for TCP) and SOCK_DGRAM (for UDP) */
if (N_rp->ai_socktype == SOCK_STREAM) {
/* TCP (stream) and connections are used. */
/* accept a connection if the client is ready, but don't block */
client_fd = accept4(N_fd, (struct sockaddr *) &N_myaddr,
&N_myaddrlen, SOCK_CLOEXEC | SOCK_NONBLOCK);
if (client_fd == -1) {
if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
return -1; /* return to calling program, which will delay before retrying */
} else { /* a serous error occurred */
PFATAL("accept4() returned error other than EAGAIN or EWOULDBLOCK");
}
}
/* read whatever the client sends and throw it away, resetting
* non-blocking mode first (because some UNIXs propagate it to
* the returned client_fd) */
o = fcntl(client_fd, F_GETFL);
if (o >= 0) {
o = o & (~O_NONBLOCK);
if (fcntl(client_fd, F_SETFL, o) < 0) {
PFATAL("failed to reset non-blocking flag on client file descriptor (TCP)");
}
}
while ((currreadlen = recv(client_fd,recvbuf,MAXRECVBUFSIZE,MSG_DONTWAIT)) > 0);
if ((currreadlen <= 0) && (errno != EAGAIN) && (errno != EWOULDBLOCK)) {
PFATAL("read error");
}
/* duplicate the file descriptor used for the fuzzed data, and use the new
* file descriptor to read that data and send it to the target process */
fd = dup(out_fd);
struct stat statbuf;
/* stat the file descriptor to obtain the size of the data to be sent */
if (fstat(fd, &statbuf) == -1) {
PFATAL("failed to obtain stat for output file to target");
}
/* seek to the beginning of the file */
lseek(fd, 0, SEEK_SET);
/* use sendfile() to transfer the data if possible because it is efficient */
if (sendfile(client_fd, fd, NULL, statbuf.st_size) == -1) {
/* if sendfile() didn't work, use read() and write() via a buffer */
lseek(fd, 0, SEEK_SET); /* reset to the beginning of the file */
u8 tempbuf[512];
u32 kread;
while ((kread = read(fd, tempbuf, 512)) > 0) {
if (write(client_fd, tempbuf, kread) != kread) {
PFATAL("file copy to network socket failed (TCP)");
}
}
}
/* leave a clean campsite (as we found it) */
lseek(fd, 0, SEEK_SET);
close(fd);
/* and close the file descriptor of the socket for the target */
close(client_fd);
} else if (N_rp->ai_socktype == SOCK_DGRAM) {
/* UDP datagrams are used.
*
* N_fd is kept open for the duration of the afl run (closed on exit)
* and reused. N_myaddr_valid signals this code that the UDP socket
* has been set up and bound to the sending side of the address & port.
* N_rp points to the address information used for the socket.
*
* Local variables:
*/
struct stat statbuf;
struct sockaddr_storage clientaddr;
u32 clientaddrlen = sizeof (struct sockaddr_storage);
/* read all available packets from the socket using non-blocking I/O */
{
int received_one = 0;
while ((currreadlen = recvfrom(N_fd, recvbuf, MAXRECVBUFSIZE, MSG_DONTWAIT,
(struct sockaddr *) &clientaddr, &clientaddrlen)) > 0) {
received_one = 1;
}
/* at least one is necessary; otherwise, return & calling program may
* wait and then try again */
if (!received_one) {
if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
return -1;
} else {
/* any other error signals imply a serious problem exists */
PFATAL("read error");
}
}
}
/* duplicate the file descriptor used for the fuzzed data, and use the new
* file descriptor to read that data and send it to the target process */
fd = dup(out_fd);
/* stat the file descriptor to obtain the size of the data to be sent */
if (fstat(fd, &statbuf) == -1) PFATAL("fstat()failed");
/* seek to the beginning of the file and create a temporary buffer to
* hold all of the data in the file */
lseek(fd, 0, SEEK_SET);
u8 tempbuf[statbuf.st_size];
/* read the entire file into the buffer */
if (read(fd, tempbuf, statbuf.st_size) != statbuf.st_size)
PFATAL("read of outfile's content failed to return expected # of bytes");
/* and send the buffer's content to the target process. Note that this
* code assumes that the entire buffer can be sent in a single packet. If
* it can not (giant packet), the user may be doing something wrong. */
if (sendto(N_fd, tempbuf, statbuf.st_size, 0,
(struct sockaddr *)&clientaddr,
clientaddrlen) < 0) {
PFATAL("partial or failed UDP write");
}
/* leave a clean campsite (as we found it) */
lseek(fd, 0, SEEK_SET);
close(fd);
}
return 0;
}
int network_send(void) {
/* This function sends fuzzed data to a target process. There are two cases:
*
* (1) TCP (streams): a connection to the target process is attempted.
* When the connection has been established, the fuzzed data are
* written.
*
* (2) UDP (datagrams/packets): The fuzzed data are written.
*
* N_results should never be a NULL pointer because the return code
* from getaddrinfo() is checked. */
if (N_results != NULL) {
/* Two cases: SOCK_STREAM (for TCP) and SOCK_DGRAM (for UDP) */
if (N_results->ai_socktype == SOCK_STREAM) {
/* TCP (stream) and connections are used.
*
* NOTE: A TCP connection must be established each time this code
* is called, and closed after the data are transfered. However, the
* same port number should be used for the sending (this) side of the
* TCP transaction every time. Otherwise, ephemeral port
* numbers might be exhausted because of TCP's TIME_WAIT timeout
* interval. N_myaddr_valid tells this code that the sending side's
* address information has been stored in N_myaddr and is to be reused.
* UDP is connectionless and is therefore different. See below.
*
* Note that the other mode of operation, where this code acts as a
* server to a target, does not have control over the target's reuse
* of ephemeral port numbers. See the comments in network_listen()
* for a discussion.
*
* Note that "soft" failures cause a return with an error code of -1. The
* calling process is expected to wait for a programmed interval of time
* (specified by the -D command line argument) and retry the call to
* network_send(), for a programmed number of times (not user-selectable)
* when this occurs.
*
* Local variables: */
int optval = 1;
if (N_myaddr_valid == 0) {
/* First time: Find the correct address and use it, saving the info
* in M_myaddr for subsequent calls. */
for (N_rp = N_results; N_rp != NULL; N_rp = N_rp->ai_next) {
/* create a socket to connect to the target process */
N_fd = socket(N_rp->ai_family, N_rp->ai_socktype, N_rp->ai_protocol);
if (N_fd == -1) {
continue;
}
/* set the socket options to reuse both the address and port */
if (setsockopt(N_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &optval,
sizeof (optval)) == -1) {
PFATAL("failed to set socket option (TCP case)");
}
/* attempt to connect to the target process, breaking out of the
* loop upon success */
if (connect(N_fd, N_rp->ai_addr, N_rp->ai_addrlen) != -1) {
break;
}
/* connect() failed, so close the file descriptor and try the
* next address information data structure */
close(N_fd);
}
if (N_rp == NULL) {
return -1; /* failed to connect; target process probably not ready */
}
/* obtain the send side socket information for re-use */
if (getsockname(N_fd, (struct sockaddr *) (&N_myaddr), &N_myaddrlen) == -1) {
PFATAL("unable to obtain local socket address information (TCP case)");
}
N_myaddr_valid = 1;
} else {
/* This is not the first time; reuse send side info in N_myaddr. */
N_fd = socket(N_rp->ai_family, N_rp->ai_socktype, N_rp->ai_protocol);
if (N_fd == -1) {
PFATAL("Subsequent attempt to create socket failed (TCP case)");
}
if (setsockopt(N_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &optval,
sizeof (optval)) == -1) {
PFATAL("Subsequent attempt to set socket option failed (TCP case)");
}
if (bind(N_fd, (struct sockaddr *) (&N_myaddr), N_myaddrlen) == -1) {
PFATAL("Attempt to bind socket to source address & port failed (TCP case)");
}
if (connect(N_fd, N_rp->ai_addr, N_rp->ai_addrlen) != -1) {
} else {
close(N_fd);
return -1; /* error returned from connect; target process not ready */
}
}
{
/* duplicate the file descriptor used for the fuzzed data, and use
* the new file descriptor to read that data and send it to the
* target process */
s32 fd = dup(out_fd);
/* stat the file descriptor to obtain the size of the data to be sent */
struct stat statbuf;
if (fstat(fd, &statbuf) == -1) PFATAL("fstat()failed");
/* seek to the beginning of the file */
lseek(fd, 0, SEEK_SET);
/* use sendfile() to transfer the data if possible because it is efficient */
if (sendfile(N_fd, fd, NULL, statbuf.st_size) == -1) {
/* if sendfile() didn't work, use read() and write() via a buffer */
lseek(fd, 0, SEEK_SET); /* reset to the beginning of the file */
/* create a temporary buffer to hold all of the data in the file */
u8 tempbuf[512];
u32 kread;
while ((kread = read(fd, tempbuf, 512)) > 0) {
if (write(N_fd, tempbuf, kread) != kread) {
PFATAL("file copy to network socket failed (TCP)");
}
}
}
/* leave a clean campsite (as we found it) */
lseek(fd, 0, SEEK_SET);
close(fd);
}
/* and close the connection to the target process, signaling EOF */
close(N_fd);
} else if (N_results->ai_socktype == SOCK_DGRAM) {
/* UDP datagrams are used.
*
* N_fd is kept open for the duration of the afl run (closed on exit)
* and reused. N_myaddr_valid signals this code that the UDP socket
* has been set up and bound to the sending side of the address & port.
* N_rp points to the recipient side's address information after the
* first call. */
if (N_myaddr_valid == 0) {
/* First time: find the appropriate sockaddr structure to be used and
* set up the sending side's socket. After the first time's successful
* execution, N_myaddr holds the sending side's socket information,
* N_rp points to the socket address structure that was used to
* create the socket, and N_fd is a valid file descriptor for the
* socket. */
for (N_rp = N_results; N_rp != NULL; N_rp = N_rp->ai_next) {
if (!((N_rp->ai_family == AF_INET) || (N_rp->ai_family == AF_INET6))) {
continue;
}
/* create appropriate struct sockaddr according to ai_family */
if (N_rp->ai_family == AF_INET6) {
memset(&N_server_addr, 0, sizeof (struct sockaddr_in6));
N_server_addr.ss_family = AF_INET6;
((struct sockaddr_in6 *) &N_server_addr)->sin6_family = AF_INET6;
((struct sockaddr_in6 *) &N_server_addr)->sin6_addr = in6addr_any;
((struct sockaddr_in6 *) &N_server_addr)->sin6_port = 0;
} else if (N_rp->ai_family == AF_INET) {
memset(&N_server_addr, 0, sizeof (struct sockaddr_in));
N_server_addr.ss_family = AF_INET;
((struct sockaddr_in *) &N_server_addr)->sin_family = AF_INET;
((struct sockaddr_in *) &N_server_addr)->sin_addr.s_addr = INADDR_ANY;
((struct sockaddr_in *) &N_server_addr)->sin_port = 0;
} else {
FATAL("invalid ai_family (UDP case)");
}
/* create socket */
N_fd = socket(N_rp->ai_family, N_rp->ai_socktype, N_rp->ai_protocol);
if (N_fd == -1) {
continue;
}
/* bind to the address using an ephemeral port number */
if (bind(N_fd, (struct sockaddr *) &N_server_addr, sizeof (struct sockaddr_storage)) < 0) {
PFATAL("bind failed (UDP case)");
} else {
/* obtain the local port number that was assigned (for debugging) */
N_myaddrlen = sizeof (struct sockaddr_storage);
if (getsockname(N_fd, (struct sockaddr *) &N_myaddr, &N_myaddrlen) < 0) {
PFATAL("get socket name failed (UDP case)");
} else {
break;
}
}
close(N_fd);
}
N_myaddr_valid = 1;
}
if (N_rp == NULL) {
return -1; /* failed to connect on any address (UDP case) */
}
{
/* duplicate the file descriptor used for the fuzzed data, and use
* the new file descriptor to read that data and send it to the
* target process */
s32 fd = dup(out_fd);
/* stat the file descriptor to obtain the size of the data to be sent */
struct stat statbuf;
if (fstat(fd, &statbuf) == -1) PFATAL("fstat()failed");
/* seek to the beginning of the file */
lseek(fd, 0, SEEK_SET);
/* create a temporary buffer to hold all of the data in the file */
u8 tempbuf[statbuf.st_size];
/* read the entire file into the buffer */
if (read(fd, tempbuf, statbuf.st_size) != statbuf.st_size) {
PFATAL("read of outfile's content failed to return expected # of bytes");
}
if (N_rp->ai_family == AF_INET) {
/* and send the buffer's content to the target process. Note that
* this code assumes that the entire buffer can be sent in a single
* packet. If it can not (giant packet), the user may be doing
* something wrong. */
if (sendto(N_fd, tempbuf, statbuf.st_size, 0,
(struct sockaddr *) ((N_rp)->ai_addr),
sizeof (struct sockaddr_in)) < 0) {
PFATAL("partial or failed UDP write (IPv4)");
}
} else if (N_rp->ai_family == AF_INET6) {
if (sendto(N_fd, tempbuf, statbuf.st_size, 0,
(struct sockaddr *) ((N_rp)->ai_addr),
sizeof (struct sockaddr_in6)) < 0) {
PFATAL("partial or failed UDP write (IPv6)");
}
}
/* leave a clean campsite (as we found it) */
lseek(fd, 0, SEEK_SET);
close(fd);
}
}
} else {
/* this should never be executed */
FATAL("no address information structures match command line network spec");
}
return 0;
}
/* Spin up fork server (instrumented mode only). The idea is explained here:
http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
In essence, the instrumentation allows us to skip execve(), and just keep
cloning a stopped child. So, we just execute once, and then send commands
through a pipe. The other part of this logic is in afl-as.h. */
static void init_forkserver(char** argv) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
int status;
s32 rlen;
ACTF("Spinning up the fork server...");
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
forksrv_pid = fork();
if (forksrv_pid < 0) PFATAL("fork() failed");
if (!forksrv_pid) {
struct rlimit r;
/* Umpf. On OpenBSD, the default fd limit for root users is set to
soft 128. Let's try to fix that... */
if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
r.rlim_cur = FORKSRV_FD + 2;
setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
}
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
#else
/* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
according to reliable sources, RLIMIT_DATA covers anonymous
maps - so we should be getting good protection against OOM bugs. */
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
/* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
before the dump is complete. */
r.rlim_max = r.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
if (out_file || N_valid == 1) { /* no stdin for file or network input */
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* Set up control and status pipes, close the unneeded original fds. */
if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
close(ctl_pipe[0]);
close(ctl_pipe[1]);
close(st_pipe[0]);
close(st_pipe[1]);
close(out_dir_fd);
close(dev_null_fd);
close(dev_urandom_fd);
close(fileno(plot_file));
/* This should improve performance a bit, since it stops the linker from
doing extra work post-fork(). */
if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
"detect_leaks=0:"
"allocator_may_return_null=1", 0);
/* MSAN is tricky, because it doesn't support abort_on_error=1 at this
point. So, we do this in a very hacky way. */
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"msan_track_origins=0", 0);
execv(target_path, argv);
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
/* Close the unneeded endpoints. */
close(ctl_pipe[0]);
close(st_pipe[1]);
fsrv_ctl_fd = ctl_pipe[1];
fsrv_st_fd = st_pipe[0];
/* Wait for the fork server to come up, but don't wait too long. */
it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
setitimer(ITIMER_REAL, &it, NULL);
rlen = read(fsrv_st_fd, &status, 4);
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
/* If we have a four-byte "hello" message from the server, we're all set.
Otherwise, try to figure out what went wrong. */
if (rlen == 4) {
OKF("All right - fork server is up.");
return;
}
if (child_timed_out)
FATAL("Timeout while initializing fork server (adjusting -t may help)");
if (waitpid(forksrv_pid, &status, 0) <= 0)
PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! Since it seems to be built with ASAN and you have a\n"
" restrictive memory limit configured, this is expected; please read\n"
" %s/notes_for_asan.txt for help.\n", doc_path);
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
" - The binary is just buggy and explodes entirely on its own. If so, you\n"
" need to fix the underlying problem or find a better replacement.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
} else {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
" - The current memory limit (%s) is too restrictive, causing the\n"
" target to hit an OOM condition in the dynamic linker. Try bumping up\n"
" the limit with the -m setting in the command line. A simple way confirm\n"
" this diagnosis would be:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary.\n\n"
" - The binary is just buggy and explodes entirely on its own. If so, you\n"
" need to fix the underlying problem or find a better replacement.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server crashed with signal %d", WTERMSIG(status));
}
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", argv[0]);
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. Since it seems to be built with ASAN and\n"
" you have a restrictive memory limit configured, this is expected; please\n"
" read %s/notes_for_asan.txt for help.\n", doc_path);
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. Perhaps there is a horrible bug in the\n"
" fuzzer. Poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
} else {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated before we could complete a\n"
" handshake with the injected code. There are %s probable explanations:\n\n"
"%s"
" - The current memory limit (%s) is too restrictive, causing an OOM\n"
" fault in the dynamic linker. This can be fixed with the -m option. A\n"
" simple way to confirm the diagnosis may be:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary.\n\n"
" - Less likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
getenv(DEFER_ENV_VAR) ? "three" : "two",
getenv(DEFER_ENV_VAR) ?
" - You are using deferred forkserver, but __AFL_INIT() is never\n"
" reached before the program terminates.\n\n" : "",
DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server handshake failed");
}
/* Execute target application, monitoring for timeouts. Return status
information. The called program will update trace_bits[]. */
static u8 run_target(char** argv) {
static struct itimerval it;
static u32 prev_timed_out = 0;
int status = 0;
u32 tb4;
child_timed_out = 0;
/* check to ensure that network listener has executed if doing network
* fuzzing of a client target (where the target writes to a socket first */
if (N_fuzz_client && !N_myaddr_valid) {
network_setup_listener();
}
/* After this memset, trace_bits[] are effectively volatile, so we
must prevent any earlier operations from venturing into that
territory. */
memset(trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
/* If we're running in "dumb" mode, we can't rely on the fork server
logic compiled into the target program, so we will just keep calling
execve(). There is a bit of code duplication between here and
init_forkserver(), but c'est la vie. */
if (dumb_mode == 1 || no_forkserver) {
child_pid = fork();
if (child_pid < 0) PFATAL("fork() failed");
if (!child_pid) {
struct rlimit r;
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
#else
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
r.rlim_max = r.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
if (out_file || N_valid == 1) { /* no stdin for file or network input */
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
close(dev_null_fd);
close(out_dir_fd);
close(dev_urandom_fd);
close(fileno(plot_file));
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
"detect_leaks=0:"
"allocator_may_return_null=1", 0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"msan_track_origins=0", 0);
execv(target_path, argv);
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
} else {
s32 res;
/* In non-dumb mode, we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
}
/* Write fuzzed data set to target using network if -N option is specified */
if (N_valid) {
if (N_timeout_given) {
/* Network output to target process after specified delay, and try
* up to three times (hard-coded) */
N_it.tv_sec = (N_exec_tmout / 1000);
N_it.tv_nsec = (N_exec_tmout % 1000) * 1000000;
/* ignore errors & accept possibility that delay can be shorter */
{
u32 N_tries = 3;
nanosleep(&N_it, NULL);
/* attempt to send up to 3 times (because of target process startup time) */
while (N_tries-- &&
((N_fuzz_client?network_listen():network_send()) == -1));
}
} else {
/* Network output to target process - no delay. This usual won't work. */
if ((N_fuzz_client?network_listen():network_send()) == -1) {
FATAL("Network: failed to connect or send; specify a network delay time");
}
}
}
/* Configure timeout, as requested by user, then wait for child to terminate. */
it.it_value.tv_sec = (exec_tmout / 1000);
it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
} else {
s32 res;
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to communicate with fork server");
}
}
child_pid = 0;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
total_execs++;
/* Any subsequent operations on trace_bits must not be moved by the
compiler below this point. Past this location, trace_bits[] behave
very normally and do not have to be treated as volatile. */
MEM_BARRIER();
tb4 = *(u32*)trace_bits;
#ifdef __x86_64__
classify_counts((u64*)trace_bits);
#else
classify_counts((u32*)trace_bits);
#endif /* ^__x86_64__ */
prev_timed_out = child_timed_out;
/* Report outcome to caller. */
if (child_timed_out) return FAULT_HANG;
if (WIFSIGNALED(status) && !stop_soon) {
kill_signal = WTERMSIG(status);
return FAULT_CRASH;
}
/* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
return FAULT_ERROR;
return FAULT_NONE;
}
/* Write modified data to file for testing. If out_file is set, the old file
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
static void write_to_testcase(void* mem, u32 len) {
s32 fd = out_fd;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
ck_write(fd, mem, len, out_file);
if (!out_file) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
/* The same, but with an adjustable gap. Used for trimming. */
static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
s32 fd = out_fd;
u32 tail_len = len - skip_at - skip_len;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
if (skip_at) ck_write(fd, mem, skip_at, out_file);
if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file);
if (!out_file) {
if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
static void show_stats(void);
/* Calibrate a new test case. This is done when processing the input directory
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u32 handicap, u8 from_queue) {
u8 fault = 0, new_bits = 0, var_detected = 0, first_run = (q->exec_cksum == 0);
u64 start_us, stop_us;
s32 old_sc = stage_cur, old_sm = stage_max, old_tmout = exec_tmout;
u8* old_sn = stage_name;
/* Be a bit more generous about timeouts when resuming sessions, or when
trying to calibrate already-added finds. This helps avoid trouble due
to intermittent latency. */
if (!from_queue || resuming_fuzz)
exec_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
exec_tmout * CAL_TMOUT_PERC / 100);
q->cal_failed++;
stage_name = "calibration";
stage_max = no_var_check ? CAL_CYCLES_NO_VAR : CAL_CYCLES;
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
init_forkserver(argv);
start_us = get_cur_time_us();
for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
u32 cksum;
if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
write_to_testcase(use_mem, q->len);
fault = run_target(argv);
/* stop_soon is set by the handler for Ctrl+C. When it's pressed,
we want to bail out quickly. */
if (stop_soon || fault != crash_mode) goto abort_calibration;
if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
if (q->exec_cksum != cksum) {
u8 hnb = has_new_bits(virgin_bits);
if (hnb > new_bits) new_bits = hnb;
if (!no_var_check && q->exec_cksum) {
var_detected = 1;
stage_max = CAL_CYCLES_LONG;
} else q->exec_cksum = cksum;
}
}
stop_us = get_cur_time_us();
total_cal_us += stop_us - start_us;
total_cal_cycles += stage_max;
/* OK, let's collect some stats about the performance of this test case.
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / stage_max;
q->bitmap_size = count_bytes(trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
total_bitmap_size += q->bitmap_size;
total_bitmap_entries++;
update_bitmap_score(q);
/* If this case didn't result in new output from the instrumentation, tell
parent. This is a non-critical problem, but something to warn the user
about. */
if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
abort_calibration:
if (new_bits == 2 && !q->has_new_cov) {
q->has_new_cov = 1;
queued_with_cov++;
}
/* Mark variable paths. */
if (var_detected && !q->var_behavior) {
mark_as_variable(q);
queued_variable++;
}
stage_name = old_sn;
stage_cur = old_sc;
stage_max = old_sm;
exec_tmout = old_tmout;
if (!first_run) show_stats();
return fault;
}
/* Examine map coverage. Called once, for first test case. */
static void check_map_coverage(void) {
u32 i;
if (count_bytes(trace_bits) < 100) return;
for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++)
if (trace_bits[i]) return;
WARNF("Recompile binary with newer version of afl to improve coverage!");
}
/* Perform dry run of all test cases to confirm that the app is working as
expected. This is done only for the initial inputs, and only once. */
static void perform_dry_run(char** argv) {
struct queue_entry* q = queue;
u32 cal_failures = 0;
u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
while (q) {
u8* use_mem;
u8 res;
s32 fd;
u8* fn = strrchr(q->fname, '/') + 1;
ACTF("Attempting dry run with '%s'...", fn);
fd = open(q->fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
use_mem = ck_alloc_nozero(q->len);
if (read(fd, use_mem, q->len) != q->len)
FATAL("Short read from '%s'", q->fname);
close(fd);
res = calibrate_case(argv, q, use_mem, 0, 1);
ck_free(use_mem);
if (stop_soon) return;
if (res == crash_mode || res == FAULT_NOBITS)
SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST,
q->len, q->bitmap_size, q->exec_us);
switch (res) {
case FAULT_NONE:
if (q == queue) check_map_coverage();
if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
break;
case FAULT_HANG:
if (timeout_given) {
/* The -t nn+ syntax in the command line sets timeout_given to '2' and
instructs afl-fuzz to tolerate but skip queue entries that time
out. */
if (timeout_given > 1) {
WARNF("Test case results in a hang (skipping)");
q->cal_failed = CAL_CHANCES;
cal_failures++;
break;
}
SAYF("\n" cLRD "[-] " cRST
"The program took more than %u ms to process one of the initial test cases.\n"
" Usually, the right thing to do is to relax the -t option - or to delete it\n"
" altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
" what you are doing and want to simply skip the unruly test cases, append\n"
" '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
exec_tmout);
FATAL("Test case '%s' results in a hang", fn);
} else {
SAYF("\n" cLRD "[-] " cRST
"The program took more than %u ms to process one of the initial test cases.\n"
" This is bad news; raising the limit with the -t option is possible, but\n"
" will probably make the fuzzing process extremely slow.\n\n"
" If this test case is just a fluke, the other option is to just avoid it\n"
" altogether, and find one that is less of a CPU hog.\n", exec_tmout);
FATAL("Test case '%s' results in a hang", fn);
}
case FAULT_CRASH:
if (crash_mode) break;
if (skip_crashes) {
WARNF("Test case results in a crash (skipping)");
q->cal_failed = CAL_CHANCES;
cal_failures++;
break;
}
if (mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Oops, the program crashed with one of the test cases provided. There are\n"
" several possible explanations:\n\n"
" - The test case causes known crashes under normal working conditions. If\n"
" so, please remove it. The fuzzer should be seeded with interesting\n"
" inputs - but not ones that cause an outright crash.\n\n"
" - The current memory limit (%s) is too low for this program, causing\n"
" it to die due to OOM when parsing valid files. To fix this, try\n"
" bumping it up with the -m setting in the command line. If in doubt,\n"
" try something along the lines of:\n\n"
#ifdef RLIMIT_AS
" ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
#else
" ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
#endif /* ^RLIMIT_AS */
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the binary. Also,\n"
" if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Least likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
DMS(mem_limit << 20), mem_limit - 1, doc_path);
} else {
SAYF("\n" cLRD "[-] " cRST
"Oops, the program crashed with one of the test cases provided. There are\n"
" several possible explanations:\n\n"
" - The test case causes known crashes under normal working conditions. If\n"
" so, please remove it. The fuzzer should be seeded with interesting\n"
" inputs - but not ones that cause an outright crash.\n\n"
#ifdef __APPLE__
" - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
" break afl-fuzz performance optimizations when running platform-specific\n"
" binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
#endif /* __APPLE__ */
" - Least likely, there is a horrible bug in the fuzzer. If other options\n"
" fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
}
FATAL("Test case '%s' results in a crash", fn);
case FAULT_ERROR:
FATAL("Unable to execute target application ('%s')", argv[0]);
case FAULT_NOINST:
FATAL("No instrumentation detected");
case FAULT_NOBITS:
useless_at_start++;
if (!in_bitmap)
WARNF("No new instrumentation output, test case may be useless.");
break;
}
if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
q = q->next;
}
if (cal_failures) {
if (cal_failures == queued_paths)
FATAL("All test cases time out%s, giving up!",
skip_crashes ? " or crash" : "");
WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
((double)cal_failures) * 100 / queued_paths,
skip_crashes ? " or crashes" : "");
if (cal_failures * 5 > queued_paths)
WARNF(cLRD "High percentage of rejected test cases, check settings!");
}
OKF("All test cases processed.");
}
/* Helper function: link() if possible, copy otherwise. */
static void link_or_copy(u8* old_path, u8* new_path) {
s32 i = link(old_path, new_path);
s32 sfd, dfd;
u8* tmp;
if (!i) return;
sfd = open(old_path, O_RDONLY);
if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
tmp = ck_alloc(64 * 1024);
while ((i = read(sfd, tmp, 64 * 1024)) > 0)
ck_write(dfd, tmp, i, new_path);
if (i < 0) PFATAL("read() failed");
ck_free(tmp);
close(sfd);
close(dfd);
}
static void nuke_resume_dir(void);
/* Create hard links for input test cases in the output directory, choosing
good names and pivoting accordingly. */
static void pivot_inputs(void) {
struct queue_entry* q = queue;
u32 id = 0;
ACTF("Creating hard links for all input files...");
while (q) {
u8 *nfn, *rsl = strrchr(q->fname, '/');
u32 orig_id;
if (!rsl) rsl = q->fname; else rsl++;
/* If the original file name conforms to the syntax and the recorded
ID matches the one we'd assign, just use the original file name.
This is valuable for resuming fuzzing runs. */
#ifndef SIMPLE_FILES
# define CASE_PREFIX "id:"
#else
# define CASE_PREFIX "id_"
#endif /* ^!SIMPLE_FILES */
if (!strncmp(rsl, CASE_PREFIX, 3) &&
sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
u8* src_str;
u32 src_id;
resuming_fuzz = 1;
nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
/* Since we're at it, let's also try to find parent and figure out the
appropriate depth for this entry. */
src_str = strchr(rsl + 3, ':');
if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
struct queue_entry* s = queue;
while (src_id-- && s) s = s->next;
if (s) q->depth = s->depth + 1;
if (max_depth < q->depth) max_depth = q->depth;
}
} else {
/* No dice - invent a new name, capturing the original one as a
substring. */
#ifndef SIMPLE_FILES
u8* use_name = strstr(rsl, ",orig:");
if (use_name) use_name += 6; else use_name = rsl;
nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
#else
nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
#endif /* ^!SIMPLE_FILES */
}
/* Pivot to the new queue entry. */
link_or_copy(q->fname, nfn);
ck_free(q->fname);
q->fname = nfn;
/* Make sure that the passed_det value carries over, too. */
if (q->passed_det) mark_as_det_done(q);
q = q->next;
id++;
}
if (in_place_resume) nuke_resume_dir();
}
#ifndef SIMPLE_FILES
/* Construct a file name for a new test case, capturing the operation
that led to its discovery. Uses a static buffer. */
static u8* describe_op(u8 hnb) {
static u8 ret[256];
if (syncing_party) {
sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
} else {
sprintf(ret, "src:%06u", current_entry);
if (splicing_with >= 0)
sprintf(ret + strlen(ret), "+%06u", splicing_with);
sprintf(ret + strlen(ret), ",op:%s", stage_short);
if (stage_cur_byte >= 0) {
sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte);
if (stage_val_type != STAGE_VAL_NONE)
sprintf(ret + strlen(ret), ",val:%s%+d",
(stage_val_type == STAGE_VAL_BE) ? "be:" : "",
stage_cur_val);
} else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val);
}
if (hnb == 2) strcat(ret, ",+cov");
return ret;
}
#endif /* !SIMPLE_FILES */
/* Write a message accompanying the crash directory :-) */
static void write_crash_readme(void) {
u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
s32 fd;
FILE* f;
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
ck_free(fn);
/* Do not die on errors here - that would be impolite. */
if (fd < 0) return;
f = fdopen(fd, "w");
if (!f) {
close(fd);
return;
}
fprintf(f, "Command line used to find this crash:\n\n"
"%s\n\n"
"If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
"memory limit. The limit used for this fuzzing session was %s.\n\n"
"Need a tool to minimize test cases before investigating the crashes or sending\n"
"them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
"Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
"me a mail at <lcamtuf@coredump.cx> once the issues are fixed - I'd love to\n"
"add your finds to the gallery at:\n\n"
" http://lcamtuf.coredump.cx/afl/\n\n"
"Thanks :-)\n",
orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
fclose(f);
}
/* Check if the result of an execve() during routine fuzzing is interesting,
save or queue the input test case for further analysis if so. Returns 1 if
entry is saved, 0 otherwise. */
static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
u8 *fn = "";
u8 hnb;
s32 fd;
u8 keeping = 0, res;
if (fault == crash_mode) {
/* Keep only if there are new bits in the map, add to queue for
future fuzzing, etc. */
if (!(hnb = has_new_bits(virgin_bits))) {
if (crash_mode) total_crashes++;
return 0;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
describe_op(hnb));
#else
fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
#endif /* ^!SIMPLE_FILES */
add_to_queue(fn, len, 0);
if (hnb == 2) {
queue_top->has_new_cov = 1;
queued_with_cov++;
}
queue_top->exec_cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
/* Try to calibrate inline; this also calls update_bitmap_score() when
successful. */
res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
if (res == FAULT_ERROR)
FATAL("Unable to execute target application");
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, mem, len, fn);
close(fd);
keeping = 1;
}
switch (fault) {
case FAULT_HANG:
/* Hangs are not very interesting, but we're still obliged to keep
a handful of samples. We use the presence of new bits in the
hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
just keep everything. */
total_hangs++;
if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
if (!dumb_mode) {
#ifdef __x86_64__
simplify_trace((u64*)trace_bits);
#else
simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */
if (!has_new_bits(virgin_hang)) return keeping;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
unique_hangs, describe_op(0));
#else
fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
unique_hangs);
#endif /* ^!SIMPLE_FILES */
unique_hangs++;
last_hang_time = get_cur_time();
break;
case FAULT_CRASH:
/* This is handled in a manner roughly similar to hangs,
except for slightly different limits. */
total_crashes++;
if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
if (!dumb_mode) {
#ifdef __x86_64__
simplify_trace((u64*)trace_bits);
#else
simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */
if (!has_new_bits(virgin_crash)) return keeping;
}
if (!unique_crashes) write_crash_readme();
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
unique_crashes, kill_signal, describe_op(0));
#else
fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
kill_signal);
#endif /* ^!SIMPLE_FILES */
unique_crashes++;
last_crash_time = get_cur_time();
break;
case FAULT_ERROR: FATAL("Unable to execute target application");
default: return keeping;
}
/* If we're here, we apparently want to save the crash or hang
test case, too. */
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, mem, len, fn);
close(fd);
ck_free(fn);
return keeping;
}
/* When resuming, try to find the queue position to start from. This makes sense
only when resuming, and when we can find the original fuzzer_stats. */
static u32 find_start_position(void) {
static u8 tmp[4096]; /* Ought to be enough for anybody. */
u8 *fn, *off;
s32 fd, i;
u32 ret;
if (!resuming_fuzz) return 0;
if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
fd = open(fn, O_RDONLY);
ck_free(fn);
if (fd < 0) return 0;
i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
close(fd);
off = strstr(tmp, "cur_path : ");
if (!off) return 0;
ret = atoi(off + 17);
if (ret >= queued_paths) ret = 0;
return ret;
}
/* The same, but for timeouts. The idea is that when resuming sessions without
-t given, we don't want to keep auto-scaling the timeout over and over
again to prevent it from growing due to random flukes. */
static void find_timeout(void) {
static u8 tmp[4096]; /* Ought to be enough for anybody. */
u8 *fn, *off;
s32 fd, i;
u32 ret;
if (!resuming_fuzz) return;
if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
fd = open(fn, O_RDONLY);
ck_free(fn);
if (fd < 0) return;
i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
close(fd);
off = strstr(tmp, "exec_timeout : ");
if (!off) return;
ret = atoi(off + 17);
if (ret <= 4) return;
exec_tmout = ret;
timeout_given = 3;
}
/* Update stats file for unattended monitoring. */
static void write_stats_file(double bitmap_cvg, double eps) {
static double last_bcvg, last_eps;
u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
s32 fd;
FILE* f;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_free(fn);
f = fdopen(fd, "w");
if (!f) PFATAL("fdopen() failed");
/* Keep last values in case we're called from another context
where exec/sec stats and such are not readily available. */
if (!bitmap_cvg && !eps) {
bitmap_cvg = last_bcvg;
eps = last_eps;
} else {
last_bcvg = bitmap_cvg;
last_eps = eps;
}
fprintf(f, "start_time : %llu\n"
"last_update : %llu\n"
"fuzzer_pid : %u\n"
"cycles_done : %llu\n"
"execs_done : %llu\n"
"execs_per_sec : %0.02f\n"
"paths_total : %u\n"
"paths_favored : %u\n"
"paths_found : %u\n"
"paths_imported : %u\n"
"max_depth : %u\n"
"cur_path : %u\n"
"pending_favs : %u\n"
"pending_total : %u\n"
"variable_paths : %u\n"
"bitmap_cvg : %0.02f%%\n"
"unique_crashes : %llu\n"
"unique_hangs : %llu\n"
"last_path : %llu\n"
"last_crash : %llu\n"
"last_hang : %llu\n"
"exec_timeout : %u\n"
"afl_banner : %s\n"
"afl_version : " VERSION "\n"
"command_line : %s\n",
start_time / 1000, get_cur_time() / 1000, getpid(),
queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
queued_paths, queued_favored, queued_discovered, queued_imported,
max_depth, current_entry, pending_favored, pending_not_fuzzed,
queued_variable, bitmap_cvg, unique_crashes, unique_hangs,
last_path_time / 1000, last_crash_time / 1000,
last_hang_time / 1000, exec_tmout, use_banner, orig_cmdline);
/* ignore errors */
fclose(f);
}
/* Update the plot file if there is a reason to. */
static void maybe_update_plot_file(double bitmap_cvg, double eps) {
static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
static u64 prev_qc, prev_uc, prev_uh;
if (prev_qp == queued_paths && prev_pf == pending_favored &&
prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
prev_qc == queue_cycle && prev_uc == unique_crashes &&
prev_uh == unique_hangs && prev_md == max_depth) return;
prev_qp = queued_paths;
prev_pf = pending_favored;
prev_pnf = pending_not_fuzzed;
prev_ce = current_entry;
prev_qc = queue_cycle;
prev_uc = unique_crashes;
prev_uh = unique_hangs;
prev_md = max_depth;
/* Fields in the file:
unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
execs_per_sec */
fprintf(plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
unique_hangs, max_depth, eps); /* ignore errors */
fflush(plot_file);
}
/* A helper function for maybe_delete_out_dir(), deleting all prefixed
files in a directory. */
static u8 delete_files(u8* path, u8* prefix) {
DIR* d;
struct dirent* d_ent;
d = opendir(path);
if (!d) return 0;
while ((d_ent = readdir(d))) {
if (d_ent->d_name[0] != '.' && (!prefix ||
!strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
ck_free(fname);
}
}
closedir(d);
return !!rmdir(path);
}
/* Get the number of runnable processes, with some simple smoothing. */
static double get_runnable_processes(void) {
static double res;
#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
/* I don't see any portable sysctl or so that would quickly give us the
number of runnable processes; the 1-minute load average can be a
semi-decent approximation, though. */
if (getloadavg(&res, 1) != 1) return 0;
#else
/* On Linux, /proc/stat is probably the best way; load averages are
computed in funny ways and sometimes don't reflect extremely short-lived
processes well. */
FILE* f = fopen("/proc/stat", "r");
u8 tmp[1024];
u32 val = 0;
if (!f) return 0;
while (fgets(tmp, sizeof(tmp), f)) {
if (!strncmp(tmp, "procs_running ", 14) ||
!strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14);
}
fclose(f);
if (!res) {
res = val;
} else {
res = res * (1.0 - 1.0 / AVG_SMOOTHING) +
((double)val) * (1.0 / AVG_SMOOTHING);
}
#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
return res;
}
/* Delete the temporary directory used for in-place session resume. */
static void nuke_resume_dir(void) {
u8* fn;
fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/_resume/.state", out_dir);
if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/_resume", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
return;
dir_cleanup_failed:
FATAL("_resume directory cleanup failed");
}
/* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
is not currently running, and if the last run time isn't too great. */
static void maybe_delete_out_dir(void) {
FILE* f;
u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir);
/* See if the output directory is locked. If yes, bail out. If not,
create a lock that will persist for the lifetime of the process
(this requires leaving the descriptor open).*/
out_dir_fd = open(out_dir, O_RDONLY);
if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
#ifndef __sun
if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
SAYF("\n" cLRD "[-] " cRST
"Looks like the job output directory is being actively used by another\n"
" instance of afl-fuzz. You will need to choose a different %s\n"
" or stop the other process first.\n",
sync_id ? "fuzzer ID" : "output location");
FATAL("Directory '%s' is in use", out_dir);
}
#endif /* !__sun */
f = fopen(fn, "r");
if (f) {
u64 start_time, last_update;
if (fscanf(f, "start_time : %llu\n"
"last_update : %llu\n", &start_time, &last_update) != 2)
FATAL("Malformed data in '%s'", fn);
fclose(f);
/* Let's see how much work is at stake. */
if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) {
SAYF("\n" cLRD "[-] " cRST
"The job output directory already exists and contains the results of more\n"
" than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n"
" automatically delete this data for you.\n\n"
" If you wish to start a new session, remove or rename the directory manually,\n"
" or specify a different output location for this job. To resume the old\n"
" session, put '-' as the input directory in the command line ('-i -') and\n"
" try again.\n", OUTPUT_GRACE);
FATAL("At-risk data found in in '%s'", out_dir);
}
}
ck_free(fn);
/* The idea for in-place resume is pretty simple: we temporarily move the old
queue/ to a new location that gets deleted once import to the new queue/
is finished. If _resume/ already exists, the current queue/ may be
incomplete due to an earlier abort, so we want to use the old _resume/
dir instead, and we let rename() fail silently. */
if (in_place_resume) {
u8* orig_q = alloc_printf("%s/queue", out_dir);
in_dir = alloc_printf("%s/_resume", out_dir);
rename(orig_q, in_dir); /* Ignore errors */
OKF("Output directory exists, will attempt session resume.");
ck_free(orig_q);
} else {
OKF("Output directory exists but deemed OK to reuse.");
}
ACTF("Deleting old session data...");
/* Okay, let's get the ball rolling! First, we need to get rid of the entries
in <out_dir>/.synced/.../id:*, if any are present. */
fn = alloc_printf("%s/.synced", out_dir);
if (delete_files(fn, NULL)) goto dir_cleanup_failed;
ck_free(fn);
/* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
/* Then, get rid of the .state subdirectory itself (should be empty by now)
and everything matching <out_dir>/queue/id:*. */
fn = alloc_printf("%s/queue/.state", out_dir);
if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/queue", out_dir);
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
/* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
if (!in_place_resume) {
fn = alloc_printf("%s/crashes/README.txt", out_dir);
unlink(fn); /* Ignore errors */
ck_free(fn);
}
fn = alloc_printf("%s/crashes", out_dir);
/* Make backup of the crashes directory if it's not empty and if we're
doing in-place resume. */
if (in_place_resume && rmdir(fn)) {
time_t cur_t = time(0);
struct tm* t = localtime(&cur_t);
#ifndef SIMPLE_FILES
u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
#else
u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
#endif /* ^!SIMPLE_FILES */
rename(fn, nfn); /* Ignore errors. */
ck_free(nfn);
}
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/hangs", out_dir);
/* Backup hangs, too. */
if (in_place_resume && rmdir(fn)) {
time_t cur_t = time(0);
struct tm* t = localtime(&cur_t);
#ifndef SIMPLE_FILES
u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
#else
u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
#endif /* ^!SIMPLE_FILES */
rename(fn, nfn); /* Ignore errors. */
ck_free(nfn);
}
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
/* And now, for some finishing touches. */
fn = alloc_printf("%s/.cur_input", out_dir);
if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
fn = alloc_printf("%s/fuzz_bitmap", out_dir);
if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
if (!in_place_resume) {
fn = alloc_printf("%s/fuzzer_stats", out_dir);
if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
}
fn = alloc_printf("%s/plot_data", out_dir);
if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
ck_free(fn);
OKF("Output dir cleanup successful.");
/* Wow... is that all? If yes, celebrate! */
return;
dir_cleanup_failed:
SAYF("\n" cLRD "[-] " cRST
"Whoops, the fuzzer tried to reuse your output directory, but bumped into\n"
" some files that shouldn't be there or that couldn't be removed - so it\n"
" decided to abort! This happened while processing this path:\n\n"
" %s\n\n"
" Please examine and manually delete the files, or specify a different\n"
" output location for the tool.\n", fn);
FATAL("Output directory cleanup failed");
}
static void check_term_size(void);
/* A spiffy retro stats screen! This is called every stats_update_freq
execve() calls, plus in several other circumstances. */
static void show_stats(void) {
static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
static double avg_exec;
double t_byte_ratio;
u64 cur_ms;
u32 t_bytes, t_bits;
u32 banner_len, banner_pad;
u8 tmp[256];
cur_ms = get_cur_time();
/* If not enough time has passed since last UI update, bail out. */
if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
/* Check if we're past the 10 minute mark. */
if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
/* Calculate smoothed exec speed stats. */
if (!last_execs) {
avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
} else {
double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
(cur_ms - last_ms);
/* If there is a dramatic (5x+) jump in speed, reset the indicator
more quickly. */
if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
avg_exec = cur_avg;
avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
cur_avg * (1.0 / AVG_SMOOTHING);
}
last_ms = cur_ms;
last_execs = total_execs;
/* Tell the callers when to contact us (as measured in execs). */
stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
if (!stats_update_freq) stats_update_freq = 1;
/* Do some bitmap stats. */
t_bytes = count_non_255_bytes(virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
/* Roughly every minute, update fuzzer stats and save auto tokens. */
if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
last_stats_ms = cur_ms;
write_stats_file(t_byte_ratio, avg_exec);
save_auto();
write_bitmap();
}
/* Every now and then, write plot data. */
if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
last_plot_ms = cur_ms;
maybe_update_plot_file(t_byte_ratio, avg_exec);
}
/* Honor AFL_EXIT_WHEN_DONE. */
if (!dumb_mode && cycles_wo_finds > 20 && !pending_not_fuzzed &&
getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
/* If we're not on TTY, bail out. */
if (not_on_tty) return;
/* Compute some mildly useful bitmap stats. */
t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
/* Now, for the visuals... */
if (clear_screen) {
SAYF(TERM_CLEAR CURSOR_HIDE);
clear_screen = 0;
check_term_size();
}
SAYF(TERM_HOME);
if (term_too_small) {
SAYF(cBRI "Your terminal is too small to display the UI.\n"
"Please resize terminal window to at least 80x25.\n" cNOR);
return;
}
/* Let's start by drawing a centered banner. */
banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner);
banner_pad = (80 - banner_len) / 2;
memset(tmp, ' ', banner_pad);
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
" (%s)", crash_mode ? cPIN "peruvian were-rabbit" :
cYEL "american fuzzy lop", use_banner);
SAYF("\n%s\n\n", tmp);
/* "Handy" shortcuts for drawing boxes... */
#define bSTG bSTART cGRA
#define bH2 bH bH
#define bH5 bH2 bH2 bH
#define bH10 bH5 bH5
#define bH20 bH10 bH10
#define bH30 bH20 bH10
#define SP5 " "
#define SP10 SP5 SP5
#define SP20 SP10 SP10
/* Lord, forgive me this. */
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH2 bHB
bH bSTOP cCYA " overall results " bSTG bH5 bRT "\n");
if (dumb_mode) {
strcpy(tmp, cNOR);
} else {
/* First queue cycle: don't stop now! */
if (queue_cycle == 1) strcpy(tmp, cMGN); else
/* Subsequent cycles, but we're still making finds. */
if (cycles_wo_finds < 3) strcpy(tmp, cYEL); else
/* No finds for a long time and no test cases to try. */
if (cycles_wo_finds > 20 && !pending_not_fuzzed) strcpy(tmp, cLGN);
/* Default: cautiously OK to stop? */
else strcpy(tmp, cLBL);
}
SAYF(bV bSTOP " run time : " cNOR "%-34s " bSTG bV bSTOP
" cycles done : %s%-5s " bSTG bV "\n",
DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
/* We want to warn people about not seeing new paths after a full cycle,
except when resuming fuzzing or running in non-instrumented mode. */
if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
in_bitmap || crash_mode)) {
SAYF(bV bSTOP " last new path : " cNOR "%-34s ",
DTD(cur_ms, last_path_time));
} else {
if (dumb_mode)
SAYF(bV bSTOP " last new path : " cPIN "n/a" cNOR
" (non-instrumented mode) ");
else
SAYF(bV bSTOP " last new path : " cNOR "none yet " cLRD
"(odd, check syntax!) ");
}
SAYF(bSTG bV bSTOP " total paths : " cNOR "%-5s " bSTG bV "\n",
DI(queued_paths));
/* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
limit with a '+' appended to the count. */
sprintf(tmp, "%s%s", DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
SAYF(bV bSTOP " last uniq crash : " cNOR "%-34s " bSTG bV bSTOP
" uniq crashes : %s%-6s " bSTG bV "\n",
DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cNOR,
tmp);
sprintf(tmp, "%s%s", DI(unique_hangs),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF(bV bSTOP " last uniq hang : " cNOR "%-34s " bSTG bV bSTOP
" uniq hangs : " cNOR "%-6s " bSTG bV "\n",
DTD(cur_ms, last_hang_time), tmp);
SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH20 bHB bH bSTOP cCYA
" map coverage " bSTG bH bHT bH20 bH2 bH bVL "\n");
/* This gets funny because we want to print several variable-length variables
together, but then cram them into a fixed-width field - so we need to
put them in a temporary buffer first. */
sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry),
queue_cur->favored ? "" : "*",
((double)current_entry * 100) / queued_paths);
SAYF(bV bSTOP " now processing : " cNOR "%-17s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(t_bytes), t_byte_ratio);
SAYF(" map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
((t_bytes < 200 && !dumb_mode) ? cPIN : cNOR), tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
((double)cur_skipped_paths * 100) / queued_paths);
SAYF(bV bSTOP " paths timed out : " cNOR "%-17s " bSTG bV, tmp);
sprintf(tmp, "%0.02f bits/tuple",
t_bytes ? (((double)t_bits) / t_bytes) : 0);
SAYF(bSTOP " count coverage : " cNOR "%-21s " bSTG bV "\n", tmp);
SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH20 bX bH bSTOP cCYA
" findings in depth " bSTG bH20 bVL "\n");
sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
((double)queued_favored) * 100 / queued_paths);
/* Yeah... it's still going on... halp? */
SAYF(bV bSTOP " now trying : " cNOR "%-21s " bSTG bV bSTOP
" favored paths : " cNOR "%-22s " bSTG bV "\n", stage_name, tmp);
if (!stage_max) {
sprintf(tmp, "%s/-", DI(stage_cur));
} else {
sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
((double)stage_cur) * 100 / stage_max);
}
SAYF(bV bSTOP " stage execs : " cNOR "%-21s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
((double)queued_with_cov) * 100 / queued_paths);
SAYF(" new edges on : " cNOR "%-22s " bSTG bV "\n", tmp);
sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
if (crash_mode) {
SAYF(bV bSTOP " total execs : " cNOR "%-21s " bSTG bV bSTOP
" new crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cNOR, tmp);
} else {
SAYF(bV bSTOP " total execs : " cNOR "%-21s " bSTG bV bSTOP
" total crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cNOR, tmp);
}
/* Show a warning about slow execution. */
if (avg_exec < 100) {
sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
"zzzz..." : "slow!");
SAYF(bV bSTOP " exec speed : " cLRD "%-21s ", tmp);
} else {
sprintf(tmp, "%s/sec", DF(avg_exec));
SAYF(bV bSTOP " exec speed : " cNOR "%-21s ", tmp);
}
sprintf(tmp, "%s (%s%s unique)", DI(total_hangs), DI(unique_hangs),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF (bSTG bV bSTOP " total hangs : " cNOR "%-22s " bSTG bV "\n", tmp);
/* Aaaalmost there... hold on! */
SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bH bHT bH10
bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bH bVL "\n");
if (skip_deterministic) {
strcpy(tmp, "n/a, n/a, n/a");
} else {
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
}
SAYF(bV bSTOP " bit flips : " cNOR "%-37s " bSTG bV bSTOP " levels : "
cNOR "%-10s " bSTG bV "\n", tmp, DI(max_depth));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
SAYF(bV bSTOP " byte flips : " cNOR "%-37s " bSTG bV bSTOP " pending : "
cNOR "%-10s " bSTG bV "\n", tmp, DI(pending_not_fuzzed));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
SAYF(bV bSTOP " arithmetics : " cNOR "%-37s " bSTG bV bSTOP " pend fav : "
cNOR "%-10s " bSTG bV "\n", tmp, DI(pending_favored));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
SAYF(bV bSTOP " known ints : " cNOR "%-37s " bSTG bV bSTOP " own finds : "
cNOR "%-10s " bSTG bV "\n", tmp, DI(queued_discovered));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
SAYF(bV bSTOP " dictionary : " cNOR "%-37s " bSTG bV bSTOP
" imported : " cNOR "%-10s " bSTG bV "\n", tmp,
sync_id ? DI(queued_imported) : (u8*)"n/a");
sprintf(tmp, "%s/%s, %s/%s",
DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]));
SAYF(bV bSTOP " havoc : " cNOR "%-37s " bSTG bV bSTOP
" variable : %s%-10s " bSTG bV "\n", tmp, queued_variable ? cLRD : cNOR,
no_var_check ? (u8*)"n/a" : DI(queued_variable));
if (!bytes_trim_out) {
sprintf(tmp, "n/a, ");
} else {
sprintf(tmp, "%0.02f%%/%s, ",
((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
DI(trim_execs));
}
if (!blocks_eff_total) {
u8 tmp2[128];
sprintf(tmp2, "n/a");
strcat(tmp, tmp2);
} else {
u8 tmp2[128];
sprintf(tmp2, "%0.02f%%",
((double)(blocks_eff_total - blocks_eff_select)) * 100 /
blocks_eff_total);
strcat(tmp, tmp2);
}
SAYF(bV bSTOP " trim : " cNOR "%-37s " bSTG bVR bH20 bH2 bH2 bRB "\n"
bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
/* Provide some CPU utilization stats. */
if (cpu_core_count) {
double cur_runnable = get_runnable_processes();
u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
u8* cpu_color = cCYA;
/* If we could still run one or more processes, use green. */
if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
cpu_color = cLGN;
/* If we're clearly oversubscribed, use red. */
if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
cpu_color, cur_utilization < 999 ? cur_utilization : 999);
} else SAYF("\r");
/* Hallelujah! */
fflush(0);
}
/* Display quick statistics at the end of processing the input directory,
plus a bunch of warnings. Some calibration stuff also ended up here,
along with several hardcoded constants. Maybe clean up eventually. */
static void show_init_stats(void) {
struct queue_entry* q = queue;
u32 min_bits = 0, max_bits = 0;
u64 min_us = 0, max_us = 0;
u64 avg_us = 0;
u32 max_len = 0;
if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
while (q) {
if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
if (q->exec_us > max_us) max_us = q->exec_us;
if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
if (q->len > max_len) max_len = q->len;
q = q->next;
}
SAYF("\n");
if (avg_us > (qemu_mode ? 50000 : 10000))
WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
doc_path);
/* Let's keep things moving with slow binaries. */
if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
if (!resuming_fuzz) {
if (max_len > 50 * 1024)
WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
DMS(max_len), doc_path);
else if (max_len > 10 * 1024)
WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
DMS(max_len), doc_path);
if (useless_at_start && !in_bitmap)
WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
if (queued_paths > 100)
WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
else if (queued_paths > 20)
WARNF("You have lots of input files; try starting small.");
}
OKF("Here are some useful stats:\n\n"
cGRA " Test case count : " cNOR "%u favored, %u variable, %u total\n"
cGRA " Bitmap range : " cNOR "%u to %u bits (average: %0.02f bits)\n"
cGRA " Exec timing : " cNOR "%s to %s us (average: %s us)\n",
queued_favored, queued_variable, queued_paths, min_bits, max_bits,
((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
DI(min_us), DI(max_us), DI(avg_us));
if (!timeout_given) {
/* Figure out the appropriate timeout. The basic idea is: 5x average or
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
If the program is slow, the multiplier is lowered to 2x or 3x, because
random scheduler jitter is less likely to have any impact, and because
our patience is wearing thin =) */
if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
else exec_tmout = avg_us * 5 / 1000;
exec_tmout = MAX(exec_tmout, max_us / 1000);
exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
exec_tmout);
timeout_given = 1;
} else if (timeout_given == 3) {
ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
}
OKF("All set and ready to roll!");
}
/* Find first power of two greater or equal to val. */
static u32 next_p2(u32 val) {
u32 ret = 1;
while (val > ret) ret <<= 1;
return ret;
}
/* Trim all new test cases to save cycles when doing deterministic checks. The
trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
file size, to keep the stage short and sweet. */
static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
u8 needs_write = 0, fault = 0;
u32 trim_exec = 0;
u32 remove_len;
u32 len_p2;
/* Although the trimmer will be less useful when variable behavior is
detected, it will still work to some extent, so we don't check for
this. */
if (q->len < 5) return 0;
stage_name = tmp;
bytes_trim_in += q->len;
/* Select initial chunk len, starting with large steps. */
len_p2 = next_p2(q->len);
remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
/* Continue until the number of steps gets too high or the stepover
gets too small. */
while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
u32 remove_pos = remove_len;
sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
stage_cur = 0;
stage_max = q->len / remove_len;
while (remove_pos < q->len) {
u32 trim_avail = MIN(remove_len, q->len - remove_pos);
u32 cksum;
write_with_gap(in_buf, q->len, remove_pos, trim_avail);
fault = run_target(argv);
trim_execs++;
if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
/* Note that we don't keep track of crashes or hangs here; maybe TODO? */
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a
best-effort pass, so it's not a big deal if we end up with false
negatives every now and then. */
if (cksum == q->exec_cksum) {
u32 move_tail = q->len - remove_pos - trim_avail;
q->len -= trim_avail;
len_p2 = next_p2(q->len);
memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
move_tail);
/* Let's save a clean trace, which will be needed by
update_bitmap_score once we're done with the trimming stuff. */
if (!needs_write) {
needs_write = 1;
memcpy(clean_trace, trace_bits, MAP_SIZE);
}
} else remove_pos += remove_len;
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
stage_cur++;
}
remove_len >>= 1;
}
/* If we have made changes to in_buf, we also need to update the on-disk
version of the test case. */
if (needs_write) {
s32 fd;
unlink(q->fname); /* ignore errors */
fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(q);
}
abort_trimming:
bytes_trim_out += q->len;
return fault;
}
/* Write a modified test case, run program, process results. Handle
error conditions, returning 1 if it's time to bail out. This is
a helper function for fuzz_one(). */
static u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
u8 fault;
if (post_handler) {
out_buf = post_handler(out_buf, &len);
if (!out_buf || !len) return 0;
}
write_to_testcase(out_buf, len);
fault = run_target(argv);
if (stop_soon) return 1;
if (fault == FAULT_HANG) {
if (subseq_hangs++ > HANG_LIMIT) {
cur_skipped_paths++;
return 1;
}
} else subseq_hangs = 0;
/* Users can hit us with SIGUSR1 to request the current input
to be abandoned. */
if (skip_requested) {
skip_requested = 0;
cur_skipped_paths++;
return 1;
}
/* This handles FAULT_ERROR for us: */
queued_discovered += save_if_interesting(argv, out_buf, len, fault);
if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
show_stats();
return 0;
}
/* Helper to choose random block len for block operations in fuzz_one().
Doesn't return zero, provided that max_len is > 0. */
static u32 choose_block_len(u32 limit) {
u32 min_value, max_value;
u32 rlim = MIN(queue_cycle, 3);
if (!run_over10m) rlim = 1;
switch (UR(rlim)) {
case 0: min_value = 1;
max_value = HAVOC_BLK_SMALL;
break;
case 1: min_value = HAVOC_BLK_SMALL;
max_value = HAVOC_BLK_MEDIUM;
break;
default: min_value = HAVOC_BLK_MEDIUM;
max_value = HAVOC_BLK_LARGE;
}
if (min_value >= limit) min_value = 1;
return min_value + UR(MIN(max_value, limit) - min_value + 1);
}
/* Calculate case desirability score to adjust the length of havoc fuzzing.
A helper function for fuzz_one(). Maybe some of these constants should
go into config.h. */
static u32 calculate_score(struct queue_entry* q) {
u32 avg_exec_us = total_cal_us / total_cal_cycles;
u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
u32 perf_score = 100;
/* Adjust score based on execution speed of this path, compared to the
global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
less expensive to fuzz, so we're giving them more air time. */
if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
/* Adjust score based on bitmap size. The working theory is that better
coverage translates to better targets. Multiplier from 0.25x to 3x. */
if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
/* Adjust score based on handicap. Handicap is proportional to how late
in the game we learned about this path. Latecomers are allowed to run
for a bit longer until they catch up with the rest. */
if (q->handicap >= 4) {
perf_score *= 4;
q->handicap -= 4;
} else if (q->handicap) {
perf_score *= 2;
q->handicap--;
}
/* Final adjustment based on input depth, under the assumption that fuzzing
deeper test cases is more likely to reveal stuff that can't be
discovered with traditional fuzzers. */
switch (q->depth) {
case 0 ... 3: break;
case 4 ... 7: perf_score *= 2; break;
case 8 ... 13: perf_score *= 4; break;
case 14 ... 25: perf_score *= 6; break;
default: perf_score *= 8;
}
/* Make sure that we don't go over limit. */
if (perf_score > HAVOC_MAX_MULT * 100) perf_score = HAVOC_MAX_MULT * 100;
return perf_score;
}
/* Helper function to see if a particular change (xor_val = old ^ new) could
be a product of deterministic bit flips with the lengths and stepovers
attempted by afl-fuzz. This is used to avoid dupes in some of the
deterministic fuzzing operations that follow bit flips. We also
return 1 if xor_val is zero, which implies that the old and attempted new
values are identical and the exec would be a waste of time. */
static u8 could_be_bitflip(u32 xor_val) {
u32 sh = 0;
if (!xor_val) return 1;
/* Shift left until first bit set. */
while (!(xor_val & 1)) { sh++; xor_val >>= 1; }
/* 1-, 2-, and 4-bit patterns are OK anywhere. */
if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1;
/* 8-, 16-, and 32-bit patterns are OK only if shift factor is
divisible by 8, since that's the stepover for these ops. */
if (sh & 7) return 0;
if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
return 1;
return 0;
}
/* Helper function to see if a particular value is reachable through
arithmetic operations. Used for similar purposes. */
static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
u32 i, ov = 0, nv = 0, diffs = 0;
if (old_val == new_val) return 1;
/* See if one-byte adjustments to any byte could produce this result. */
for (i = 0; i < blen; i++) {
u8 a = old_val >> (8 * i),
b = new_val >> (8 * i);
if (a != b) { diffs++; ov = a; nv = b; }
}
/* If only one byte differs and the values are within range, return 1. */
if (diffs == 1) {
if ((u8)(ov - nv) <= ARITH_MAX ||
(u8)(nv - ov) <= ARITH_MAX) return 1;
}
if (blen == 1) return 0;
/* See if two-byte adjustments to any byte would produce this result. */
diffs = 0;
for (i = 0; i < blen / 2; i++) {
u16 a = old_val >> (16 * i),
b = new_val >> (16 * i);
if (a != b) { diffs++; ov = a; nv = b; }
}
/* If only one word differs and the values are within range, return 1. */
if (diffs == 1) {
if ((u16)(ov - nv) <= ARITH_MAX ||
(u16)(nv - ov) <= ARITH_MAX) return 1;
ov = SWAP16(ov); nv = SWAP16(nv);
if ((u16)(ov - nv) <= ARITH_MAX ||
(u16)(nv - ov) <= ARITH_MAX) return 1;
}
/* Finally, let's do the same thing for dwords. */
if (blen == 4) {
if ((u32)(old_val - new_val) <= ARITH_MAX ||
(u32)(new_val - old_val) <= ARITH_MAX) return 1;
new_val = SWAP32(new_val);
old_val = SWAP32(old_val);
if ((u32)(old_val - new_val) <= ARITH_MAX ||
(u32)(new_val - old_val) <= ARITH_MAX) return 1;
}
return 0;
}
/* Last but not least, a similar helper to see if insertion of an
interesting integer is redundant given the insertions done for
shorter blen. The last param (check_le) is set if the caller
already executed LE insertion for current blen and wants to see
if BE variant passed in new_v
gitextract_kd351qrk/ ├── Makefile ├── afl-as.c ├── afl-as.h ├── afl-cmin ├── afl-fuzz.c ├── afl-gcc.c ├── afl-gotcpu.c ├── afl-plot ├── afl-showmap.c ├── afl-tmin.c ├── afl-whatsup ├── alloc-inl.h ├── config.h ├── debug.h ├── docs/ │ ├── COPYING │ ├── ChangeLog │ ├── INSTALL │ ├── QuickStartGuide.txt │ ├── README │ ├── env_variables.txt │ ├── historical_notes.txt │ ├── notes_for_asan.txt │ ├── parallel_fuzzing.txt │ ├── perf_tips.txt │ ├── sister_projects.txt │ ├── status_screen.txt │ ├── technical_details.txt │ └── vuln_samples/ │ ├── bash-cmd-exec.var │ ├── bash-uninit-mem.var │ ├── file-fpu-exception.elf │ ├── jxrlib-crash.jxr │ ├── jxrlib-crash2.jxr │ ├── jxrlib-crash3.jxr │ ├── jxrlib-crash4.jxr │ ├── lesspipe-cpio-bad-write.cpio │ ├── libtiff-bad-write.tif │ ├── libtiff-uninit-mem.tif │ ├── libtiff-uninit-mem2.tif │ ├── libtiff-uninit-mem3.tif │ ├── libtiff-uninit-mem4.tif │ ├── libxml2-bad-read.xml │ ├── msie-jxr-mem-leak.jxr │ ├── msie-tiff-mem-leak.tif │ ├── openssl-null-ptr.der │ ├── openssl-null-ptr2.der │ ├── sqlite-bad-free.sql │ ├── sqlite-bad-ptr.sql │ ├── sqlite-bad-ptr2.sql │ ├── sqlite-bad-ptr3.sql │ ├── sqlite-heap-overflow.sql │ ├── sqlite-heap-overwrite.sql │ ├── sqlite-negative-memset.sql │ ├── sqlite-null-ptr1.sql │ ├── sqlite-null-ptr10.sql │ ├── sqlite-null-ptr11.sql │ ├── sqlite-null-ptr12.sql │ ├── sqlite-null-ptr13.sql │ ├── sqlite-null-ptr14.sql │ ├── sqlite-null-ptr15.sql │ ├── sqlite-null-ptr2.sql │ ├── sqlite-null-ptr3.sql │ ├── sqlite-null-ptr4.sql │ ├── sqlite-null-ptr5.sql │ ├── sqlite-null-ptr6.sql │ ├── sqlite-null-ptr7.sql │ ├── sqlite-null-ptr8.sql │ ├── sqlite-null-ptr9.sql │ ├── sqlite-oob-read.sql │ ├── sqlite-oob-write.sql │ ├── sqlite-stack-buf-overflow.sql │ ├── sqlite-stack-exhaustion.sql │ ├── sqlite-unint-mem.sql │ ├── sqlite-use-after-free.sql │ ├── strings-bfd-badptr.elf │ ├── strings-bfd-badptr2.elf │ ├── strings-stack-overflow │ ├── strings-unchecked-ctr.elf │ ├── tcpdump-arp-crash.pcap │ ├── tcpdump-ppp-crash.pcap │ └── unrtf-arbitrary-read.rtf ├── experimental/ │ ├── README.experiments │ ├── argv_fuzzing/ │ │ └── argv-fuzz-inl.h │ ├── asan_cgroups/ │ │ └── limit_memory.sh │ ├── bash_shellshock/ │ │ └── shellshock-fuzz.diff │ ├── canvas_harness/ │ │ └── canvas_harness.html │ ├── clang_asm_normalize/ │ │ └── as │ ├── crash_triage/ │ │ └── triage_crashes.sh │ ├── distributed_fuzzing/ │ │ └── sync_script.sh │ ├── instrumented_cmp/ │ │ └── instrumented_cmp.c │ ├── libpng_no_checksum/ │ │ └── libpng-nocrc.patch │ ├── persistent_demo/ │ │ └── persistent_demo.c │ └── post_library/ │ ├── post_library.so.c │ └── post_library_png.so.c ├── hash.h ├── llvm_mode/ │ ├── Makefile │ ├── README.llvm │ ├── afl-clang-fast.c │ ├── afl-llvm-pass.so.cc │ └── afl-llvm-rt.o.c ├── qemu_mode/ │ ├── README.qemu │ ├── build_qemu_support.sh │ └── patches/ │ ├── afl-qemu-cpu-inl.h │ ├── cpu-exec.diff │ ├── elfload.diff │ ├── syscall.diff │ └── translate-all.diff ├── test-instr.c ├── testcases/ │ ├── README.testcases │ ├── _extras/ │ │ ├── gif.dict │ │ ├── html_tags.dict │ │ ├── jpeg.dict │ │ ├── js.dict │ │ ├── pdf.dict │ │ ├── png.dict │ │ ├── sql.dict │ │ ├── tiff.dict │ │ ├── webp.dict │ │ └── xml.dict │ ├── archives/ │ │ ├── common/ │ │ │ ├── ar/ │ │ │ │ └── small_archive.a │ │ │ ├── bzip2/ │ │ │ │ └── small_archive.bz2 │ │ │ ├── cab/ │ │ │ │ └── small_archive.cab │ │ │ ├── compress/ │ │ │ │ └── small_archive.Z │ │ │ ├── cpio/ │ │ │ │ └── small_archive.cpio │ │ │ ├── lzo/ │ │ │ │ └── small_archive.lzo │ │ │ └── xz/ │ │ │ └── small_archive.xz │ │ └── exotic/ │ │ ├── arj/ │ │ │ └── small_archive.arj │ │ ├── lha/ │ │ │ └── small_archive.lha │ │ ├── lrzip/ │ │ │ └── small_archive.lrz │ │ ├── lzip/ │ │ │ └── small_archive.lz │ │ ├── lzma/ │ │ │ └── small_archive.lzma │ │ ├── rzip/ │ │ │ └── small_archive.rz │ │ └── zoo/ │ │ └── small_archive.zoo │ ├── images/ │ │ ├── jp2/ │ │ │ └── not_kitty.jp2 │ │ ├── jxr/ │ │ │ └── not_kitty.jxr │ │ └── tiff/ │ │ └── not_kitty.tiff │ └── others/ │ ├── elf/ │ │ └── small_exec.elf │ ├── js/ │ │ └── small_script.js │ ├── pcap/ │ │ └── small_capture.pcap │ ├── rtf/ │ │ └── small_document.rtf │ ├── sql/ │ │ └── simple_queries.sql │ ├── text/ │ │ └── hello_world.txt │ └── xml/ │ └── small_document.xml └── types.h
SYMBOL INDEX (193 symbols across 23 files)
FILE: afl-as.c
function edit_params (line 87) | static void edit_params(int argc, char** argv) {
function main (line 467) | int main(int argc, char** argv) {
FILE: afl-fuzz.c
type addrinfo (line 196) | struct addrinfo
type sockaddr_storage (line 199) | struct sockaddr_storage
type sockaddr_storage (line 200) | struct sockaddr_storage
type sockaddr_storage (line 201) | struct sockaddr_storage
type timespec (line 214) | struct timespec
type queue_entry (line 217) | struct queue_entry {
type queue_entry (line 246) | struct queue_entry
type queue_entry (line 251) | struct queue_entry
type extra_data (line 254) | struct extra_data {
type extra_data (line 260) | struct extra_data
type extra_data (line 263) | struct extra_data
function u64 (line 318) | static u64 get_cur_time(void) {
function u64 (line 332) | static u64 get_cur_time_us(void) {
function u32 (line 347) | static inline u32 UR(u32 limit) {
function locate_diffs (line 370) | static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* l...
function u8 (line 401) | static u8* DI(u64 val) {
function u8 (line 458) | static u8* DF(double val) {
function u8 (line 479) | static u8* DMS(u64 val) {
function u8 (line 530) | static u8* DTD(u64 cur_ms, u64 event_ms) {
function mark_as_det_done (line 555) | static void mark_as_det_done(struct queue_entry* q) {
function mark_as_variable (line 576) | static void mark_as_variable(struct queue_entry* q) {
function mark_as_redundant (line 602) | static void mark_as_redundant(struct queue_entry* q, u8 state) {
function add_to_queue (line 633) | static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
function destroy_queue (line 668) | static void destroy_queue(void) {
function write_bitmap (line 689) | static void write_bitmap(void) {
function read_bitmap (line 712) | static void read_bitmap(u8* fname) {
function u8 (line 736) | static inline u8 has_new_bits(u8* virgin_map) {
function u32 (line 823) | static u32 count_bits(u8* mem) {
function u32 (line 856) | static u32 count_bytes(u8* mem) {
function u32 (line 882) | static u32 count_non_255_bytes(u8* mem) {
function simplify_trace (line 932) | static void simplify_trace(u64* mem) {
function simplify_trace (line 963) | static void simplify_trace(u32* mem) {
function classify_counts (line 1007) | static inline void classify_counts(u64* mem) {
function classify_counts (line 1038) | static inline void classify_counts(u32* mem) {
function remove_shm (line 1068) | static void remove_shm(void) {
function minimize_bits (line 1079) | static void minimize_bits(u8* dst, u8* src) {
function update_bitmap_score (line 1103) | static void update_bitmap_score(struct queue_entry* q) {
function cull_queue (line 1154) | static void cull_queue(void) {
function setup_shm (line 1209) | static void setup_shm(void) {
function setup_post (line 1244) | static void setup_post(void) {
function read_testcases (line 1272) | static void read_testcases(void) {
function compare_extras_len (line 1368) | static int compare_extras_len(const void* p1, const void* p2) {
function compare_extras_use_d (line 1375) | static int compare_extras_use_d(const void* p1, const void* p2) {
function load_extras_file (line 1385) | static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
function load_extras (line 1522) | static void load_extras(u8* dir) {
function u8 (line 1624) | static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
function maybe_add_auto (line 1634) | static void maybe_add_auto(u8* mem, u32 len) {
function save_auto (line 1740) | static void save_auto(void) {
function load_auto (line 1768) | static void load_auto(void) {
function destroy_extras (line 1811) | static void destroy_extras(void) {
function network_setup_listener (line 1850) | void network_setup_listener(void) {
function network_listen (line 1956) | int network_listen(void) {
function network_send (line 2106) | int network_send(void) {
function init_forkserver (line 2341) | static void init_forkserver(char** argv) {
function u8 (line 2622) | static u8 run_target(char** argv) {
function write_to_testcase (line 2854) | static void write_to_testcase(void* mem, u32 len) {
function write_with_gap (line 2882) | static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
function u8 (line 2917) | static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
function check_map_coverage (line 3037) | static void check_map_coverage(void) {
function perform_dry_run (line 3054) | static void perform_dry_run(char** argv) {
function link_or_copy (line 3258) | static void link_or_copy(u8* old_path, u8* new_path) {
function pivot_inputs (line 3291) | static void pivot_inputs(void) {
function u8 (line 3384) | static u8* describe_op(u8 hnb) {
function write_crash_readme (line 3425) | static void write_crash_readme(void) {
function u8 (line 3474) | static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
function u32 (line 3636) | static u32 find_start_position(void) {
function find_timeout (line 3671) | static void find_timeout(void) {
function write_stats_file (line 3706) | static void write_stats_file(double bitmap_cvg, double eps) {
function maybe_update_plot_file (line 3776) | static void maybe_update_plot_file(double bitmap_cvg, double eps) {
function u8 (line 3816) | static u8 delete_files(u8* path, u8* prefix) {
function get_runnable_processes (line 3847) | static double get_runnable_processes(void) {
function nuke_resume_dir (line 3900) | static void nuke_resume_dir(void) {
function maybe_delete_out_dir (line 3940) | static void maybe_delete_out_dir(void) {
function show_stats (line 4185) | static void show_stats(void) {
function show_init_stats (line 4624) | static void show_init_stats(void) {
function u32 (line 4724) | static u32 next_p2(u32 val) {
function u8 (line 4737) | static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
function u8 (line 4863) | static u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
function u32 (line 4915) | static u32 choose_block_len(u32 limit) {
function u32 (line 4949) | static u32 calculate_score(struct queue_entry* q) {
function u8 (line 5023) | static u8 could_be_bitflip(u32 xor_val) {
function u8 (line 5053) | static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
function u8 (line 5134) | static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_...
function u8 (line 5206) | static u8 fuzz_one(char** argv) {
function sync_fuzzers (line 6863) | static void sync_fuzzers(char** argv) {
function handle_stop_sig (line 6995) | static void handle_stop_sig(int sig) {
function handle_skipreq (line 7007) | static void handle_skipreq(int sig) {
function handle_timeout (line 7015) | static void handle_timeout(int sig) {
function check_binary (line 7036) | static void check_binary(u8* fname) {
function fix_up_banner (line 7202) | static void fix_up_banner(u8* name) {
function check_if_tty (line 7232) | static void check_if_tty(void) {
function check_term_size (line 7251) | static void check_term_size(void) {
function usage (line 7267) | static void usage(u8* argv0) {
function setup_dirs_fds (line 7330) | static void setup_dirs_fds(void) {
function setup_stdio_file (line 7450) | static void setup_stdio_file(void) {
function check_crash_handling (line 7467) | static void check_crash_handling(void) {
function check_cpu_governor (line 7533) | static void check_cpu_governor(void) {
function get_core_count (line 7589) | static void get_core_count(void) {
function fix_up_sync (line 7665) | static void fix_up_sync(void) {
function handle_resize (line 7707) | static void handle_resize(int sig) {
function check_asan_opts (line 7714) | static void check_asan_opts(void) {
function detect_file_args (line 7731) | static void detect_file_args(char** argv) {
function setup_signal_handlers (line 7780) | static void setup_signal_handlers(void) {
function save_cmdline (line 7892) | static void save_cmdline(u32 argc, char** argv) {
function main (line 7922) | int main(int argc, char** argv) {
FILE: afl-gcc.c
function find_as (line 56) | static void find_as(u8* argv0) {
function edit_params (line 110) | static void edit_params(u32 argc, char** argv) {
function main (line 275) | int main(int argc, char** argv) {
FILE: afl-gotcpu.c
function u64 (line 45) | static u64 get_cur_time_us(void) {
function u64 (line 59) | static u64 get_cpu_usage_us(void) {
function main (line 73) | int main(int argc, char** argv) {
FILE: afl-showmap.c
function classify_counts (line 94) | static void classify_counts(u8* mem) {
function remove_shm (line 119) | static void remove_shm(void) {
function setup_shm (line 128) | static void setup_shm(void) {
function u32 (line 152) | static u32 write_results(void) {
function handle_timeout (line 202) | static void handle_timeout(int sig) {
function run_target (line 212) | static void run_target(char** argv) {
function handle_stop_sig (line 320) | static void handle_stop_sig(int sig) {
function set_up_environment (line 331) | static void set_up_environment(void) {
function setup_signal_handlers (line 345) | static void setup_signal_handlers(void) {
function detect_file_args (line 372) | static void detect_file_args(char** argv) {
function show_banner (line 416) | static void show_banner(void) {
function usage (line 424) | static void usage(u8* argv0) {
function find_binary (line 457) | static void find_binary(u8* fname) {
function main (line 569) | int main(int argc, char** argv) {
FILE: afl-tmin.c
function classify_counts (line 102) | static void classify_counts(u8* mem) {
function u8 (line 127) | static inline u8 anything_set(void) {
function remove_shm (line 142) | static void remove_shm(void) {
function setup_shm (line 152) | static void setup_shm(void) {
function read_initial_file (line 177) | static void read_initial_file(void) {
function s32 (line 204) | static s32 write_to_file(u8* path, u8* mem, u32 len) {
function handle_timeout (line 225) | static void handle_timeout(int sig) {
function u8 (line 236) | static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
function u32 (line 381) | static u32 next_p2(u32 val) {
function minimize (line 392) | static void minimize(char** argv) {
function handle_stop_sig (line 634) | static void handle_stop_sig(int sig) {
function set_up_environment (line 645) | static void set_up_environment(void) {
function setup_signal_handlers (line 692) | static void setup_signal_handlers(void) {
function detect_file_args (line 719) | static void detect_file_args(char** argv) {
function usage (line 761) | static void usage(u8* argv0) {
function find_binary (line 793) | static void find_binary(u8* fname) {
function main (line 905) | int main(int argc, char** argv) {
FILE: alloc-inl.h
function DFL_ck_free (line 139) | static inline void DFL_ck_free(void* mem) {
function u8 (line 258) | static inline u8* DFL_ck_strdup(u8* str) {
function u8 (line 309) | static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
type TRK_obj (line 358) | struct TRK_obj {
type TRK_obj (line 366) | struct TRK_obj
type TRK_obj (line 373) | struct TRK_obj
function TRK_alloc_buf (line 387) | static inline void TRK_alloc_buf(void* ptr, const char* file, const char...
function TRK_free_buf (line 427) | static inline void TRK_free_buf(void* ptr, const char* file, const char*...
function TRK_report (line 455) | static inline void TRK_report(void) {
function TRK_ck_free (line 534) | static inline void TRK_ck_free(void* ptr, const char* file,
FILE: docs/vuln_samples/sqlite-bad-free.sql
type t0 (line 1) | create table t0(o CHar(0)CHECK(0&O>O))
FILE: docs/vuln_samples/sqlite-bad-ptr2.sql
type t1 (line 1) | CREATE TABLE t1("""0"PRIMARY KEy REFERENCES t1 ON DELETE SET NULL)
FILE: docs/vuln_samples/sqlite-null-ptr1.sql
type t0 (line 1) | create table t0(t)
FILE: docs/vuln_samples/sqlite-null-ptr8.sql
type p (line 1) | CREATE TABLE p(a UNIQUE,PRIMARY KEY('a'))
FILE: docs/vuln_samples/sqlite-null-ptr9.sql
type t0 (line 1) | CREATE TABLE t0(z)
FILE: docs/vuln_samples/sqlite-use-after-free.sql
type t (line 1) | create table t(s)
FILE: experimental/instrumented_cmp/instrumented_cmp.c
function my_memcmp (line 34) | inline int my_memcmp(char* ptr1, char* ptr2, int len) {
function main (line 47) | int main(int argc, char** argv) {
FILE: experimental/persistent_demo/persistent_demo.c
function main (line 34) | int main(int argc, char** argv) {
FILE: hash.h
function u32 (line 26) | static inline u32 hash32(const void* key, u32 len, u32 seed) {
function u32 (line 61) | static inline u32 hash32(const void* key, u32 len, u32 seed) {
FILE: llvm_mode/afl-clang-fast.c
function find_obj (line 43) | static void find_obj(u8* argv0) {
function edit_params (line 97) | static void edit_params(u32 argc, char** argv) {
function main (line 239) | int main(int argc, char** argv) {
FILE: llvm_mode/afl-llvm-pass.so.cc
class AFLCoverage (line 43) | class AFLCoverage : public ModulePass {
method AFLCoverage (line 48) | AFLCoverage() : ModulePass(ID) { }
function registerAFLPass (line 169) | static void registerAFLPass(const PassManagerBuilder &,
FILE: llvm_mode/afl-llvm-rt.o.c
function __afl_map_shm (line 52) | static void __afl_map_shm(void) {
function __afl_start_forkserver (line 82) | static void __afl_start_forkserver(void) {
function __afl_persistent_loop (line 163) | int __afl_persistent_loop(unsigned int max_cnt) {
function __afl_manual_init (line 189) | void __afl_manual_init(void) {
function __afl_auto_init (line 206) | __attribute__((constructor(0))) void __afl_auto_init(void) {
FILE: qemu_mode/patches/afl-qemu-cpu-inl.h
type afl_tsl (line 96) | struct afl_tsl {
function afl_setup (line 110) | static void afl_setup(void) {
function afl_forkserver (line 157) | static void afl_forkserver(CPUArchState *env) {
function afl_maybe_log (line 224) | static inline void afl_maybe_log(abi_ulong cur_loc) {
function afl_request_tsl (line 259) | static void afl_request_tsl(target_ulong pc, target_ulong cb, uint64_t f...
function afl_wait_tsl (line 278) | static void afl_wait_tsl(CPUArchState *env, int fd) {
FILE: test-instr.c
function main (line 21) | int main(int argc, char** argv) {
FILE: testcases/others/sql/simple_queries.sql
type t1 (line 1) | create table t1(one smallint)
FILE: types.h
type u8 (line 23) | typedef uint8_t u8;
type u16 (line 24) | typedef uint16_t u16;
type u32 (line 25) | typedef uint32_t u32;
type u64 (line 44) | typedef unsigned long long u64;
type u64 (line 46) | typedef uint64_t u64;
type s8 (line 49) | typedef int8_t s8;
type s16 (line 50) | typedef int16_t s16;
type s32 (line 51) | typedef int32_t s32;
type s64 (line 52) | typedef int64_t s64;
Condensed preview — 143 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (735K chars).
[
{
"path": "Makefile",
"chars": 6001,
"preview": "#\n# american fuzzy lop - makefile\n# -----------------------------\n#\n# Written and maintained by Michal Zalewski <lcamtuf"
},
{
"path": "afl-as.c",
"chars": 15127,
"preview": "/*\n american fuzzy lop - wrapper for GNU as\n ---------------------------------------\n\n Written and maintained by M"
},
{
"path": "afl-as.h",
"chars": 21058,
"preview": "/*\n american fuzzy lop - injectable parts\n -------------------------------------\n\n Written and maintained by Micha"
},
{
"path": "afl-cmin",
"chars": 11047,
"preview": "#!/usr/bin/env bash\n#\n# american fuzzy lop - corpus minimization tool\n# ---------------------------------------------\n#\n"
},
{
"path": "afl-fuzz.c",
"chars": 230095,
"preview": "/*\n american fuzzy lop - fuzzer code\n --------------------------------\n\n Written and maintained by Michal Zalewski"
},
{
"path": "afl-gcc.c",
"chars": 7625,
"preview": "/*\n american fuzzy lop - wrapper for GCC and clang\n ----------------------------------------------\n\n Written and m"
},
{
"path": "afl-gotcpu.c",
"chars": 3185,
"preview": "/*\n american fuzzy lop - free CPU gizmo\n -----------------------------------\n\n Written and maintained by Michal Za"
},
{
"path": "afl-plot",
"chars": 4857,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - Advanced Persistent Graphing\n# -------------------------------------------------\n#\n# "
},
{
"path": "afl-showmap.c",
"chars": 15081,
"preview": "/*\n american fuzzy lop - map display utility\n ----------------------------------------\n\n Written and maintained by"
},
{
"path": "afl-tmin.c",
"chars": 23122,
"preview": "/*\n american fuzzy lop - test case minimizer\n ----------------------------------------\n\n Written and maintained by"
},
{
"path": "afl-whatsup",
"chars": 3629,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - status check tool\n# --------------------------------------\n#\n# Written and maintained"
},
{
"path": "alloc-inl.h",
"chars": 12565,
"preview": "/*\n american fuzzy lop - error-checking, memory-zeroing alloc routines\n --------------------------------------------"
},
{
"path": "config.h",
"chars": 10937,
"preview": "/*\n american fuzzy lop - vaguely configurable bits\n ----------------------------------------------\n\n Written and m"
},
{
"path": "debug.h",
"chars": 5774,
"preview": "/*\n american fuzzy lop - debug / error handling macros\n --------------------------------------------------\n\n Writt"
},
{
"path": "docs/COPYING",
"chars": 11358,
"preview": "\n Apache License\n Version 2.0, January 2004\n "
},
{
"path": "docs/ChangeLog",
"chars": 58255,
"preview": "=========\nChangeLog\n=========\n\n This is the list of all noteworthy changes made in every public release of\n the tool. "
},
{
"path": "docs/INSTALL",
"chars": 6491,
"preview": "=========================\nInstallation instructions\n=========================\n\n This document provides basic installati"
},
{
"path": "docs/QuickStartGuide.txt",
"chars": 1980,
"preview": "=====================\nAFL quick start guide\n=====================\n\nYou should read docs/README. It's pretty short. If yo"
},
{
"path": "docs/README",
"chars": 27309,
"preview": "==================\namerican fuzzy lop\n==================\n\n Written and maintained by Michal Zalewski <lcamtuf@google.co"
},
{
"path": "docs/env_variables.txt",
"chars": 8935,
"preview": "=======================\nEnvironmental variables\n=======================\n\n This document discusses the environment varia"
},
{
"path": "docs/historical_notes.txt",
"chars": 7739,
"preview": "================\nHistorical notes\n================\n\n This doc talks about the rationale of some of the high-level desig"
},
{
"path": "docs/notes_for_asan.txt",
"chars": 6059,
"preview": "==================================\nNotes for using ASAN with afl-fuzz\n==================================\n\n This file di"
},
{
"path": "docs/parallel_fuzzing.txt",
"chars": 8972,
"preview": "=========================\nTips for parallel fuzzing\n=========================\n\n This document talks about synchronizing"
},
{
"path": "docs/perf_tips.txt",
"chars": 8823,
"preview": "=================================\nTips for performance optimization\n=================================\n\n This file provi"
},
{
"path": "docs/sister_projects.txt",
"chars": 6701,
"preview": "===============\nSister projects\n===============\n\n This doc lists some of the projects that are inspired by, derived fro"
},
{
"path": "docs/status_screen.txt",
"chars": 17950,
"preview": "===============================\nUnderstanding the status screen\n===============================\n\n This document provide"
},
{
"path": "docs/technical_details.txt",
"chars": 22368,
"preview": "===================================\nTechnical \"whitepaper\" for afl-fuzz\n===================================\n\n This docu"
},
{
"path": "docs/vuln_samples/bash-cmd-exec.var",
"chars": 28,
"preview": "() { _; } >_[$($())] { id; }"
},
{
"path": "docs/vuln_samples/bash-uninit-mem.var",
"chars": 34,
"preview": "() { x() { _; }; x() { _; } <<a; }"
},
{
"path": "docs/vuln_samples/libxml2-bad-read.xml",
"chars": 72,
"preview": "<!DOCTYPEd[<!ENTITY\nS\t\"\"><!ENTITY %\nN\t\"<!ELEMENT<![INCLUDE0\"<!ENTITYL%N;"
},
{
"path": "docs/vuln_samples/sqlite-bad-free.sql",
"chars": 84,
"preview": "create table t0(o CHar(0)CHECK(0&O>O));insert into t0\nselect randomblob(0)-trim(0);\n"
},
{
"path": "docs/vuln_samples/sqlite-bad-ptr.sql",
"chars": 52,
"preview": "SELECT 0 UNION SELECT 0 ORDER BY 1 COLLATE\"\"\"\"\"\"\"\";\n"
},
{
"path": "docs/vuln_samples/sqlite-bad-ptr2.sql",
"chars": 116,
"preview": "PRAGMA foreign_keys=1;CREATE TABLE t1(\"\"\"0\"PRIMARY KEy REFERENCES t1 ON DELETE SET NULL);REPLACE INTO t1 SELECT(0);\n"
},
{
"path": "docs/vuln_samples/sqlite-heap-overflow.sql",
"chars": 196,
"preview": "DROP TABLE IF EXISTS t;CREATE VIRTUAL TABLE t0 USING fts4();insert into t0 select zeroblob(0);SAVEPOINT O;insert into t0"
},
{
"path": "docs/vuln_samples/sqlite-heap-overwrite.sql",
"chars": 5045,
"preview": "ATTACH \"file:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
},
{
"path": "docs/vuln_samples/sqlite-negative-memset.sql",
"chars": 531,
"preview": "SELECT*from(select\"\",zeroblob(0),zeroblob(1E9),zeroblob(0),zeroblob(150000000),zeroblob(0),zeroblob(0),zeroblob(0),zerob"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr1.sql",
"chars": 53,
"preview": "create table t0(t);insert into t0\nselect strftime();\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr10.sql",
"chars": 29,
"preview": "SELECT fts3_tokenizer(@0());\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr11.sql",
"chars": 28,
"preview": "select''like''like''like#0;\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr12.sql",
"chars": 69,
"preview": "PRAGMA e;select lower(0);select lower(0)\"a\",\"\"GROUP BY a ORDER BY a;\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr13.sql",
"chars": 70,
"preview": "WITH x AS(SELECT*FROM t)SELECT\"\"EXCEPT SELECT 0 ORDER BY 0 COLLATE\"\";\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr14.sql",
"chars": 286,
"preview": "CREATE VIRTUAL TABLE x USING fts4();VALUES(0,0),(0,0),(0,0),(0,0);PRAGMA writable_schema=ON;UPDATE sqlite_master SET sql"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr15.sql",
"chars": 172,
"preview": "CREATE VIRTUAL TABLE t4 USING fts4(0,b,c,notindexed=0);INSERT INTO t4 VALUES('','','0');BEGIN;INSERT INTO t4 VALUES('','"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr2.sql",
"chars": 32,
"preview": "DETACH(select group_concat(q));\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr3.sql",
"chars": 27,
"preview": "select(select strftime());\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr4.sql",
"chars": 19,
"preview": "select n()AND+#00;\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr5.sql",
"chars": 27,
"preview": "select e.*,0 from(s,(L))e;\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr6.sql",
"chars": 54,
"preview": "PRAGMA encoding='UTF16';CREATE VIRTUAL TABLE USING s;"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr7.sql",
"chars": 46,
"preview": "CREATE VIRTUAL TABLE t USING fts4(tokenize=);\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr8.sql",
"chars": 56,
"preview": "CREATE TABLE p(a UNIQUE,PRIMARY KEY('a'))WITHOUT rowid;\n"
},
{
"path": "docs/vuln_samples/sqlite-null-ptr9.sql",
"chars": 85,
"preview": "CREATE TABLE t0(z);WITH d(x)AS(SELECT*UNION SELECT 0)INSERT INTO t0 SELECT 0 FROM d;\n"
},
{
"path": "docs/vuln_samples/sqlite-oob-read.sql",
"chars": 67,
"preview": "create table t0( DEFAULT(0=0)NOT/**/NULL);REPLACE into t0 select'';"
},
{
"path": "docs/vuln_samples/sqlite-oob-write.sql",
"chars": 219,
"preview": "CREATE VIRTUAL TABLE t0 USING fts4(x,order=DESC);\nINSERT INTO t0(docid,x)VALUES(-1E0,'0(o');\nINSERT INTO t0 VALUES('');\n"
},
{
"path": "docs/vuln_samples/sqlite-stack-buf-overflow.sql",
"chars": 114,
"preview": "SELECT printf('%*.*f',90000||006000000&6600000000,00000000000000000909000000000000.0000000000000000)\"\"WHERE\"\">\"\";\n"
},
{
"path": "docs/vuln_samples/sqlite-stack-exhaustion.sql",
"chars": 48,
"preview": "CREATE VIRTUAL TABLE t0 USING fts4(content=t0);\n"
},
{
"path": "docs/vuln_samples/sqlite-unint-mem.sql",
"chars": 70,
"preview": "REATE VIRTUAL TABLE t0 USING fts4(prefix=0);INSERT INTO t0 VALUES(0);\n"
},
{
"path": "docs/vuln_samples/sqlite-use-after-free.sql",
"chars": 224,
"preview": "create table t(s);PRAGMA writable_schema=ON;UPDATE sqlite_master SET sql='ANALYZE;CREATE VIRTUAL TABLE t USING fts3;DROP"
},
{
"path": "docs/vuln_samples/strings-stack-overflow",
"chars": 14,
"preview": "$$@$$$@$o\nSo\nS"
},
{
"path": "experimental/README.experiments",
"chars": 2001,
"preview": "Here's a quick overview of the stuff you can find in this directory:\n\n - argv_fuzzing - a simple wrapper to all"
},
{
"path": "experimental/argv_fuzzing/argv-fuzz-inl.h",
"chars": 1908,
"preview": "/*\n american fuzzy lop - sample argv fuzzing wrapper\n ------------------------------------------------\n\n Written b"
},
{
"path": "experimental/asan_cgroups/limit_memory.sh",
"chars": 3836,
"preview": "#!/usr/bin/env bash\n#\n# american fuzzy lop - limit memory using cgroups\n# ----------------------------------------------"
},
{
"path": "experimental/bash_shellshock/shellshock-fuzz.diff",
"chars": 1868,
"preview": "This patch shows a very simple way to find post-Shellshock bugs in bash, as\ndiscussed here:\n\n http://lcamtuf.blogspot.c"
},
{
"path": "experimental/canvas_harness/canvas_harness.html",
"chars": 3476,
"preview": "<html>\n<!--\n\n american fuzzy lop - <canvas> harness\n -------------------------------------\n \n Written and maintained "
},
{
"path": "experimental/clang_asm_normalize/as",
"chars": 1610,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - clang assembly normalizer\n# ----------------------------------------------\n#\n# Writte"
},
{
"path": "experimental/crash_triage/triage_crashes.sh",
"chars": 2258,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - crash triage utility\n# -----------------------------------------\n#\n# Written and main"
},
{
"path": "experimental/distributed_fuzzing/sync_script.sh",
"chars": 2292,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - path minimization tool\n# -------------------------------------------\n#\n# Written and "
},
{
"path": "experimental/instrumented_cmp/instrumented_cmp.c",
"chars": 1449,
"preview": "/*\n\n A simple proof-of-concept for instrumented strcpy() or memcpy().\n\n Normally, afl-fuzz will have difficulty ever"
},
{
"path": "experimental/libpng_no_checksum/libpng-nocrc.patch",
"chars": 426,
"preview": "--- pngrutil.c.orig\t2014-06-12 03:35:16.000000000 +0200\n+++ pngrutil.c\t2014-07-01 05:08:31.000000000 +0200\n@@ -268,7 +26"
},
{
"path": "experimental/persistent_demo/persistent_demo.c",
"chars": 2723,
"preview": "/*\n american fuzzy lop - persistent mode example\n --------------------------------------------\n\n Written and maint"
},
{
"path": "experimental/post_library/post_library.so.c",
"chars": 4271,
"preview": "/*\n american fuzzy lop - postprocessor library example\n --------------------------------------------------\n\n Writt"
},
{
"path": "experimental/post_library/post_library_png.so.c",
"chars": 2889,
"preview": "/*\n american fuzzy lop - postprocessor for PNG\n ------------------------------------------\n\n Written and maintaine"
},
{
"path": "hash.h",
"chars": 1706,
"preview": "/*\n american fuzzy lop - hashing function\n -------------------------------------\n\n The hash32() function is a vari"
},
{
"path": "llvm_mode/Makefile",
"chars": 3748,
"preview": "#\n# american fuzzy lop - LLVM instrumentation\n# -----------------------------------------\n#\n# Written by Laszlo Szekeres"
},
{
"path": "llvm_mode/README.llvm",
"chars": 7131,
"preview": "============================================\nFast LLVM-based instrumentation for afl-fuzz\n=============================="
},
{
"path": "llvm_mode/afl-clang-fast.c",
"chars": 7104,
"preview": "/*\n american fuzzy lop - LLVM-mode wrapper for clang\n ------------------------------------------------\n\n Written b"
},
{
"path": "llvm_mode/afl-llvm-pass.so.cc",
"chars": 4762,
"preview": "/*\n american fuzzy lop - LLVM-mode instrumentation pass\n ---------------------------------------------------\n\n Wri"
},
{
"path": "llvm_mode/afl-llvm-rt.o.c",
"chars": 4611,
"preview": "/*\n american fuzzy lop - LLVM instrumentation bootstrap\n ---------------------------------------------------\n\n Wri"
},
{
"path": "qemu_mode/README.qemu",
"chars": 5118,
"preview": "=========================================================\nHigh-performance binary-only instrumentation for afl-fuzz\n===="
},
{
"path": "qemu_mode/build_qemu_support.sh",
"chars": 4319,
"preview": "#!/bin/sh\n#\n# american fuzzy lop - QEMU build script\n# --------------------------------------\n#\n# Written by Andrew Grif"
},
{
"path": "qemu_mode/patches/afl-qemu-cpu-inl.h",
"chars": 7269,
"preview": "/*\n american fuzzy lop - high-performance binary-only instrumentation\n ---------------------------------------------"
},
{
"path": "qemu_mode/patches/cpu-exec.diff",
"chars": 981,
"preview": "--- qemu-2.3.0/cpu-exec.c.orig 2014-12-09 14:45:40.000000000 +0000\n+++ qemu-2.3.0/cpu-exec.c 2015-02-20 22:07:02.96"
},
{
"path": "qemu_mode/patches/elfload.diff",
"chars": 1059,
"preview": "--- qemu-2.3.0/linux-user/elfload.c.orig\t2014-12-09 14:45:42.000000000 +0000\n+++ qemu-2.3.0/linux-user/elfload.c\t2015-01"
},
{
"path": "qemu_mode/patches/syscall.diff",
"chars": 896,
"preview": "--- qemu-2.3.0/linux-user/syscall.c.orig\t2014-12-09 14:45:43.000000000 +0000\n+++ qemu-2.3.0/linux-user/syscall.c\t2015-03"
},
{
"path": "qemu_mode/patches/translate-all.diff",
"chars": 814,
"preview": "--- qemu-2.3.0/translate-all.c.orig 2014-12-09 14:45:46.000000000 +0000\n+++ qemu-2.3.0/translate-all.c 2015-01-28 2"
},
{
"path": "test-instr.c",
"chars": 789,
"preview": "/*\n american fuzzy lop - a trivial program to test the build\n ------------------------------------------------------"
},
{
"path": "testcases/README.testcases",
"chars": 2713,
"preview": "===============================\nAFL test cases and dictionaries\n===============================\n\n (See ../docs/README f"
},
{
"path": "testcases/_extras/gif.dict",
"chars": 298,
"preview": "#\n# AFL dictionary for GIF images\n# -----------------------------\n#\n# Created by Michal Zalewski <lcamtuf@google.com>\n#\n"
},
{
"path": "testcases/_extras/html_tags.dict",
"chars": 3176,
"preview": "#\n# AFL dictionary for HTML parsers (tags only)\n# -------------------------------------------\n#\n# A basic collection of "
},
{
"path": "testcases/_extras/jpeg.dict",
"chars": 458,
"preview": "#\n# AFL dictionary for JPEG images\n# ------------------------------\n#\n# Created by Michal Zalewski <lcamtuf@google.com>\n"
},
{
"path": "testcases/_extras/js.dict",
"chars": 2305,
"preview": "#\n# AFL dictionary for JavaScript\n# -----------------------------\n#\n# Contains basic reserved keywords and syntax buildi"
},
{
"path": "testcases/_extras/pdf.dict",
"chars": 16589,
"preview": "#\n# AFL dictionary for PDF\n# ----------------------\n#\n# This is a pretty big PDF dictionary constructed by Ben by manual"
},
{
"path": "testcases/_extras/png.dict",
"chars": 768,
"preview": "#\n# AFL dictionary for PNG images\n# -----------------------------\n#\n# Just the basic, standard-originating sections; doe"
},
{
"path": "testcases/_extras/sql.dict",
"chars": 8290,
"preview": "#\n# AFL dictionary for SQL\n# ----------------------\n#\n# Modeled based on SQLite documentation, contains some number of S"
},
{
"path": "testcases/_extras/tiff.dict",
"chars": 1081,
"preview": "#\n# AFL dictionary for TIFF images\n# ------------------------------\n#\n# Just the basic, standard-originating sections; d"
},
{
"path": "testcases/_extras/webp.dict",
"chars": 360,
"preview": "#\n# AFL dictionary for WebP images\n# ------------------------------\n#\n# Created by Michal Zalewski <lcamtuf@google.com>\n"
},
{
"path": "testcases/_extras/xml.dict",
"chars": 1619,
"preview": "#\n# AFL dictionary for XML\n# ----------------------\n#\n# Several basic syntax elements and attributes, modeled on libxml2"
},
{
"path": "testcases/archives/common/ar/small_archive.a",
"chars": 260,
"preview": "!<arch>\nlimerick/ 1415337776 500 500 100640 191 `\nThere was a young man from Japan\nWhose limericks nev"
},
{
"path": "testcases/others/js/small_script.js",
"chars": 20,
"preview": "if (1==1) eval('1');"
},
{
"path": "testcases/others/rtf/small_document.rtf",
"chars": 21,
"preview": "{\\rtf1\\pard Test\\par}"
},
{
"path": "testcases/others/sql/simple_queries.sql",
"chars": 75,
"preview": "create table t1(one smallint);\ninsert into t1 values(1);\nselect * from t1;\n"
},
{
"path": "testcases/others/text/hello_world.txt",
"chars": 6,
"preview": "hello\n"
},
{
"path": "testcases/others/xml/small_document.xml",
"chars": 15,
"preview": "<a b=\"c\">d</a>\n"
},
{
"path": "types.h",
"chars": 2100,
"preview": "/*\n american fuzzy lop - type definitions and minor macros\n ------------------------------------------------------\n\n"
}
]
// ... and 40 more files (download for full content)
About this extraction
This page contains the full source code of the jdbirdwell/afl GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 143 files (676.2 KB), approximately 191.9k tokens, and a symbol index with 193 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.