Repository: saferwall/pe Branch: main Commit: 984afb23943d Files: 91 Total size: 765.7 KB Directory structure: gitextract_so1rfj4w/ ├── .editorconfig ├── .gitattributes ├── .github/ │ ├── FUNDING.YML │ └── workflows/ │ └── ci.yaml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── anomaly.go ├── anomaly_test.go ├── arch.go ├── boundimports.go ├── boundimports_test.go ├── cmd/ │ ├── dump.go │ ├── main.go │ ├── size.go │ └── size_test.go ├── debug.go ├── debug_test.go ├── delayimports.go ├── delayimports_test.go ├── dosheader.go ├── dosheader_test.go ├── dotnet.go ├── dotnet_helper.go ├── dotnet_metadata_tables.go ├── dotnet_test.go ├── exception.go ├── exception_test.go ├── exports.go ├── exports_test.go ├── file.go ├── file_test.go ├── globalptr.go ├── go.mod ├── go.sum ├── helper.go ├── helper_test.go ├── iat.go ├── imports.go ├── imports_test.go ├── loadconfig.go ├── loadconfig_test.go ├── log/ │ ├── README.md │ ├── filter.go │ ├── filter_test.go │ ├── global.go │ ├── global_test.go │ ├── helper.go │ ├── helper_test.go │ ├── level.go │ ├── level_test.go │ ├── log.go │ ├── log_test.go │ ├── std.go │ ├── std_test.go │ ├── value.go │ └── value_test.go ├── ntheader.go ├── ntheader_test.go ├── ordlookup.go ├── overlay.go ├── overlay_test.go ├── pe.go ├── reloc.go ├── reloc_test.go ├── resource.go ├── resource_test.go ├── richheader.go ├── richheader_test.go ├── scripts/ │ ├── extract-rsrc-lang.py │ └── ms-lcid.txt ├── section.go ├── section_test.go ├── security.go ├── security_linux_mac.go ├── security_test.go ├── security_windows.go ├── staticcheck.conf ├── symbol.go ├── symbol_test.go ├── test/ │ ├── WdBoot.sys │ ├── acpi.sys │ ├── amdi2c.sys │ ├── amdxata.sys │ └── look ├── tls.go ├── tls_test.go ├── version.go └── version_test.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .editorconfig ================================================ ; https://editorconfig.org/ root = true [*] insert_final_newline = true charset = utf-8 trim_trailing_whitespace = true indent_style = space indent_size = 2 [{Makefile,go.mod,go.sum,*.go,.gitmodules}] indent_style = tab indent_size = 4 [*.md] indent_size = 4 trim_trailing_whitespace = false eclint_indent_style = unset [Dockerfile] indent_size = 4 ================================================ FILE: .gitattributes ================================================ # Treat all files in the Go repo as binary, with no git magic updating # line endings. This produces predictable results in different environments. # # Windows users contributing to Go will need to use a modern version # of git and editors capable of LF line endings. # # Windows .bat files are known to have multiple bugs when run with LF # endings, and so they are checked in with CRLF endings, with a test # in test/winbatch.go to catch problems. (See golang.org/issue/37791.) # # We'll prevent accidental CRLF line endings from entering the repo # via the git-codereview gofmt checks and tests. # # See golang.org/issue/9281. * -text ================================================ FILE: .github/FUNDING.YML ================================================ github: LordNoteworthy ================================================ FILE: .github/workflows/ci.yaml ================================================ name: Build & Test on: [push] jobs: test: name: Build & Test strategy: fail-fast: false matrix: go-version: [1.20.x, 1.21.x, 1.22.x, 1.23.x, 1.24.x, 1.25.x, 1.26.x] os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - name: Checkout uses: actions/checkout@v6 - name: Install Go uses: actions/setup-go@v6 with: go-version: ${{ matrix.go-version }} - name: Build run: | go env -w GOFLAGS=-mod=mod go build -v ./... - name: Extract test data run: | cd test 7z x "*.7z" -pinfected - name: Test With Coverage run: go test -race -coverprofile=coverage -covermode=atomic - name: Upload coverage to Codecov uses: codecov/codecov-action@v2 with: files: ./coverage if: matrix.os == 'windows-latest' && matrix.go-version == '1.23.x' - name: Go vet run: | go vet . if: matrix.os == 'windows-latest' && matrix.go-version == '1.23.x' - name: Staticcheck uses: dominikh/staticcheck-action@v1.3.1 with: version: "2024.1" install-go: false cache-key: ${{ matrix.go }} if: matrix.os == 'windows-latest' && matrix.go-version == '1.23.x' ================================================ FILE: .gitignore ================================================ # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out coverage # Dependency directories (remove the comment below to include it) vendor/ # Code editors configs .idea/ .vscode/launch.json # Go fuzz artefact crashers/ suppressions/ # Log files *.log test/testdata/ ================================================ FILE: CHANGELOG.md ================================================ # Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [1.4.0] - Unreleased ### Added - Permit more granular control over which data directories are parsed by [rabbitstack](https://github.com/rabbitstack) [#72](https://github.com/saferwall/pe/pull/72). - Support parsing the different `retpoline` types: Imported Address, Indirect Branch and Switchable retpoline [#70](https://github.com/saferwall/pe/pull/70). - Unit tests for load config directory [#70](https://github.com/saferwall/pe/pull/69). - Unit tests for TLS directory [#69](https://github.com/saferwall/pe/pull/69). - Unit tests for debug directory [#68](https://github.com/saferwall/pe/pull/68). - Unit tests for resource directory and add functions to prettify resource (sub)languages [#66](https://github.com/saferwall/pe/pull/66). - Annotate PE structures with JSON tags during JSON encoding [#64](https://github.com/saferwall/pe/pull/64), [#65](https://github.com/saferwall/pe/pull/65) and [#67](https://github.com/saferwall/pe/pull/67). - Improve PE dumper to print imports and unit test parsing imports data directory[#63](https://github.com/saferwall/pe/pull/63). - Improve PE dumper to print section headers [#62](https://github.com/saferwall/pe/pull/62). - Improve PE dumper to print PE headers [#61](https://github.com/saferwall/pe/pull/61). - Add `SerialNumber`, `SignatureAlgorithm` and `PubKeyAlgorithm` to the `CertInfo` [#60](https://github.com/saferwall/pe/pull/60). - Option to disable certificate validation [#59](https://github.com/saferwall/pe/pull/59). - Improve PE dumper to print exceptions [#57](https://github.com/saferwall/pe/pull/57). - Unit tests for debug directory [#49](https://github.com/saferwall/pe/pull/49). ### Fixed - Bug while iterating over VolatileInfoRangeTable entries [#70](https://github.com/saferwall/pe/pull/70). - Bug while iterating (additional padding and loop condition) over DVRT relocation block entries [#70](https://github.com/saferwall/pe/pull/70). - Bug while appending (twice) Control Flow Guard IAT entries [#70](https://github.com/saferwall/pe/pull/70). - Bug while parsing `POGO` debug entry types [#68](https://github.com/saferwall/pe/pull/68). - `Authentihash()` for instances w/o fd thanks to [flanfly](https://github.com/flanfly) [#47](https://github.com/saferwall/pe/pull/47). ### Changed - Some fields has been renamed for consistency: - `RichHeader.XorKey` -> `RichHeader.XORKey`. - Any `Rva` substring -> `RVA` and any `Iat` substring -> `IAT`. - And many more. - Some fields used internally in imports parsing were changed from a slice of pointers to a simple slice. - Certificate.Content changed from `*pkcs7.PKCS7` to `pkcs7.PKCS7`. - `Section.Entropy` changed from `float64` to `float64*` to distinguish between the case when the section entropy is equal to zero and the case when the entropy is equal to nil - meaning that it was never calculated. - Remove `cobra` dependency from `cmd/pedumper` [#56](https://github.com/saferwall/pe/pull/56). ## [1.3.0] - 2022-08-04 ## Added - Authenticode signature validation in Windows [#43](https://github.com/saferwall/pe/pull/43). - File information structure that helps to identify what parts of the PE file we have, such as `HasImports()` [#42](https://github.com/saferwall/pe/pull/42).. - Calculate Rich header hash thanks to [wanglei-coder](https://github.com/wanglei-coder) [#38](https://github.com/saferwall/pe/pull/38). - PE Overlay thanks to [wanglei-coder](https://github.com/wanglei-coder) [#37](https://github.com/saferwall/pe/pull/37). - Unit tests for DOS header parsing. - Unit tests for CLR directory [#34](https://github.com/saferwall/pe/pull/28). - Unit tests for Rich header [#33](https://github.com/saferwall/pe/pull/33). ## Changed - Do not return an error when parsing a data directory fails [#45](https://github.com/saferwall/pe/pull/45). - Remove pointers from fields in the main `File` structure [#44](https://github.com/saferwall/pe/pull/44). ### Fixed - Fix getting section data repeatedly thanks to [wanglei-coder](https://github.com/wanglei-coder) [#41](https://github.com/saferwall/pe/pull/41). - Fix `adjustSectionAlignment()` thanks to [wanglei-coder](https://github.com/wanglei-coder) [#40](https://github.com/saferwall/pe/pull/40). - Fix authentihash calculation thanks to [wanglei-coder](https://github.com/wanglei-coder) [#38](https://github.com/saferwall/pe/pull/38). - Memory leak in `Close()` function that missed a call to `unmap()` thanks to [Mamba24L8](https://github.com/Mamba24L8). ## [1.2.0] - 2022-06-12 ## Added - Unit tests for export directory [#28](https://github.com/saferwall/pe/pull/28). - Add a new option to allow usage of a custom logger [#24](https://github.com/saferwall/pe/pull/24). - Unit tests for delay imports directory [#23](https://github.com/saferwall/pe/pull/23). - Allow access to the raw certificates content [#22](https://github.com/saferwall/pe/pull/22). - Unit tests for security directory [#19](https://github.com/saferwall/pe/pull/19). - Unit tests for bound imports directory [#18](https://github.com/saferwall/pe/pull/18). ## Changed - Make `GetData()` and `GetRVAFromOffset()` and `GetOffsetFromRva()` helper routines public. - Keep parsing in exports directories even when anomalies are found [#26](https://github.com/saferwall/pe/pull/26). ## Fixed - Incorrect check for `skipCertVerification` in security directory. - Null pointer dereference in `GetExportFunctionByRVA()` and out of bounds when calculating `symbolAddress` in export directory [#28](https://github.com/saferwall/pe/pull/28). - Reading unicode string from resource directory `readUnicodeStringAtRVA()` [#26](https://github.com/saferwall/pe/pull/26). - Null pointer dereference in resource directory parsing [#25](https://github.com/saferwall/pe/pull/25). - Imphash calculation [#17](https://github.com/saferwall/pe/pull/17) thanks to [@secDre4mer](https://github.com/secDre4mer). - Null certificate header in security directory [#19](https://github.com/saferwall/pe/pull/19) ## [1.1.0] - 2021-12-20 ### Added - Add .editorconfig and .vscode config. - Add github action CI workflow to test the package. - Add few badges for the README.md to track build status, coverage and code quality. - Introduce a new API to parse a file from a byte array. - Parse .net metadata Module table. - Parse .net metadata stream headers and metadata tables stream header. - Add cmd/pedumper to illustrate how to use the library. - Add unit test for relocation, exception, security, symbol, file, nt header, section and helper files. - Add an option `New()` to customize max of relocations entries and COFF symbols to parse. ### Changed - Remove uneeded break statements & lowercase error messages and anomalies. - Make COFF entry in File struct a pointer. - Remove unsafe pointer usage from resource directory. - Do not return an error when COFF symbol table is not found. - License from Apache 2 to MIT. ### Fixed - Probe for invalid Nt Header offset. - Fix authenticode hash calculation. - Compile correctly on 32 bit thnkas to @Max Altgelt. - COFF symbol table `readASCIIStringAtOffset()` out of bounds exception. - Probe for optional header section alignment != 0. - Fix infinite loop in exception unwind code parsing. - Fix last data directory entry is reserved and must be zero. - Safe ready of global pointer register ## [1.0.0] - 2021-03-04 (Initial Release) - Works with PE32/PE32+ file fomat. - Supports Intel x86/AMD64/ARM7ARM7 Thumb/ARM8-64/IA64/CHPE architectures. - MS DOS header. - Rich Header (calculate checksum). - NT Header (file header + optional header). - COFF symbol table and string table. - Sections headers + entropy calculation. - Data directories: - Import Table + ImpHash calculation. - Export Table. - Resource Table. - Exceptions Table. - Security Table + Authentihash calculation. - Relocations Table. - Debug Table (CODEVIEW, POGO, VC FEATURE, REPRO, FPO, EXDLL CHARACTERISTICS debug types). - TLS Table. - Load Config Directory (SEH, GFID, GIAT, Guard LongJumps, CHPE, Dynamic Value Reloc Table, Enclave Configuration, Volatile Metadata tables). - Bound Import Table. - Delay Import Table. - COM Table (CLR Metadata Header, Metadata Table Streams). - Report several anomalies. ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at report@saferwall.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2021 Saferwall Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ Saferwall logo # Portable Executable Parser [![GoDoc](http://godoc.org/github.com/saferwall/pe?status.svg)](https://pkg.go.dev/github.com/saferwall/pe) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg) [![Report Card](https://goreportcard.com/badge/github.com/saferwall/pe)](https://goreportcard.com/report/github.com/saferwall/pe) [![codecov](https://codecov.io/gh/saferwall/pe/branch/main/graph/badge.svg?token=W7WTOUZLRY)](https://codecov.io/gh/saferwall/pe) ![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/saferwall/pe/ci.yaml?branch=main) **pe** is a go package for parsing the [portable executable](https://docs.microsoft.com/en-us/windows/win32/debug/pe-format) file format. This package was designed with malware analysis in mind, and being resistent to PE malformations. ## Table of content - [Portable Executable Parser](#portable-executable-parser) - [Table of content](#table-of-content) - [Features](#features) - [Installing](#installing) - [Using the library](#using-the-library) - [PE Header](#pe-header) - [Rich Header](#rich-header) - [Iterating over sections](#iterating-over-sections) - [Roadmap](#roadmap) - [Fuzz Testing](#fuzz-testing) - [Projects Using This Library](#projects-using-this-library) - [References](#references) ## Features - Works with PE32/PE32+ file format. - Supports Intel x86/AMD64/ARM7ARM7 Thumb/ARM8-64/IA64/CHPE architectures. - MS DOS header. - Rich Header (calculate checksum and hash). - NT Header (file header + optional header). - COFF symbol table and string table. - Sections headers + entropy calculation. - Data directories - Import Table + ImpHash calculation. - Export Table - Resource Table - Exceptions Table - Security Table + Authentihash calculation. - Relocations Table - Debug Table (CODEVIEW, POGO, VC FEATURE, REPRO, FPO, EXDLL CHARACTERISTICS debug types). - TLS Table - Load Config Directory (SEH, GFID, GIAT, Guard LongJumps, CHPE, Dynamic Value Reloc Table, Enclave Configuration, Volatile Metadata tables). - Bound Import Table - Delay Import Table - COM Table (CLR Metadata Header, Metadata Table Streams) - Report several anomalies ## Installing Using this go package is easy. First, use `go get` to install the latest version of the library. This command will install the `pedumper` executable along with the library and its dependencies: go get -u github.com/saferwall/pe Next, include `pe` package in your application: ```go import "github.com/saferwall/pe" ``` ## Using the library ```go package main import ( peparser "github.com/saferwall/pe" ) func main() { filename := "C:\\Binaries\\notepad.exe" pe, err := peparser.New(filename, &peparser.Options{}) if err != nil { log.Fatalf("Error while opening file: %s, reason: %v", filename, err) } err = pe.Parse() if err != nil { log.Fatalf("Error while parsing file: %s, reason: %v", filename, err) } } ``` Start by instantiating a pe object by called the `New()` method, which takes the file path to the file to be parsed and some optional options. Afterwards, a call to the `Parse()` method will give you access to all the different part of the PE format, directly accessible to be used. Here is the definition of the struct: ```go type File struct { DOSHeader ImageDOSHeader `json:"dos_header,omitempty"` RichHeader RichHeader `json:"rich_header,omitempty"` NtHeader ImageNtHeader `json:"nt_header,omitempty"` COFF COFF `json:"coff,omitempty"` Sections []Section `json:"sections,omitempty"` Imports []Import `json:"imports,omitempty"` Export Export `json:"export,omitempty"` Debugs []DebugEntry `json:"debugs,omitempty"` Relocations []Relocation `json:"relocations,omitempty"` Resources ResourceDirectory `json:"resources,omitempty"` TLS TLSDirectory `json:"tls,omitempty"` LoadConfig LoadConfig `json:"load_config,omitempty"` Exceptions []Exception `json:"exceptions,omitempty"` Certificates CertificateSection `json:"certificates,omitempty"` DelayImports []DelayImport `json:"delay_imports,omitempty"` BoundImports []BoundImportDescriptorData `json:"bound_imports,omitempty"` GlobalPtr uint32 `json:"global_ptr,omitempty"` CLR CLRData `json:"clr,omitempty"` IAT []IATEntry `json:"iat,omitempty"` Anomalies []string `json:"anomalies,omitempty"` Header []byte data mmap.MMap FileInfo size uint32 OverlayOffset int64 f *os.File opts *Options logger *log.Helper } ``` ### PE Header As mentioned before, all members of the struct are directly (no getters) accessible, additionally, the fields types has been preserved as the spec defines them, that means if you need to show the prettified version of an `int` type, you have to call the corresponding helper function. ```go fmt.Printf("Magic is: 0x%x\n", pe.DOSHeader.Magic) fmt.Printf("Signature is: 0x%x\n", pe.NtHeader.Signature) fmt.Printf("Machine is: 0x%x, Meaning: %s\n", pe.NtHeader.FileHeader.Machine, pe.NtHeader.FileHeader.Machine.String()) ``` Output: ``` Magic is: 0x5a4d Signature is: 0x4550 Machine is: 0x8664, Meaning: x64 ``` ### Rich Header Example: ```go richHeader, _ := json.Marshal(pe.RichHeader) fmt.Print(prettyPrint(richHeader)) ``` Output: ```json { "XorKey": 2796214951, "CompIDs": [ { "MinorCV": 27412, "ProdID": 257, "Count": 4, "Unmasked": 16870164 }, { "MinorCV": 30729, "ProdID": 147, "Count": 193, "Unmasked": 9664521 }, { "MinorCV": 0, "ProdID": 1, "Count": 1325, "Unmasked": 65536 }, { "MinorCV": 27412, "ProdID": 260, "Count": 9, "Unmasked": 17066772 }, { "MinorCV": 27412, "ProdID": 259, "Count": 3, "Unmasked": 17001236 }, { "MinorCV": 27412, "ProdID": 256, "Count": 1, "Unmasked": 16804628 }, { "MinorCV": 27412, "ProdID": 269, "Count": 209, "Unmasked": 17656596 }, { "MinorCV": 27412, "ProdID": 255, "Count": 1, "Unmasked": 16739092 }, { "MinorCV": 27412, "ProdID": 258, "Count": 1, "Unmasked": 16935700 } ], "DansOffset": 128, "Raw": "47vE9afaqqan2qqmp9qqprOxq6ej2qqmrqI5pmbaqqan2qumit+qprOxrqeu2qqms7Gpp6TaqqazsaqnptqqprOxp6d22qqms7FVpqbaqqazsainptqqplJpY2in2qqm" } ``` ### Iterating over sections ```go for _, sec := range pe.Sections { fmt.Printf("Section Name : %s\n", sec.NameString()) fmt.Printf("Section VirtualSize : %x\n", sec.Header.VirtualSize) fmt.Printf("Section Flags : %x, Meaning: %v\n\n", sec.Header.Characteristics, sec.PrettySectionFlags()) } ``` Output: ``` Section Name : .text Section VirtualSize : 2ea58 Section Flags : 60500060, Meaning: [Align8Bytes Readable Align16Bytes Executable Contains Code Initialized Data Align1Bytes] Section Name : .data Section VirtualSize : 58 Section Flags : c0500040, Meaning: [Readable Initialized Data Writable Align1Bytes Align16Bytes Align8Bytes] Section Name : .rdata Section VirtualSize : 18d0 Section Flags : 40600040, Meaning: [Align2Bytes Align8Bytes Readable Initialized Data Align32Bytes] ... ``` ## Roadmap - imports MS-styled names demangling - PE: VB5 and VB6 typical structures: project info, DLLCall-imports, referenced modules, object table ## Fuzz Testing To validate the parser we use the [go-fuzz](https://github.com/dvyukov/go-fuzz) and a corpus of known malformed and tricky PE files from [corkami](https://github.com/corkami/pocs/tree/master/PE). ## Projects Using This Library Fibratus [Fibratus](https://github.com/rabbitstack/fibratus) A modern tool for Windows kernel exploration and tracing with a focus on security. ## References - [Peering Inside the PE: A Tour of the Win32 Portable Executable File Format by Matt Pietrek](http://bytepointer.com/resources/pietrek_peering_inside_pe.htm) - [An In-Depth Look into the Win32 Portable Executable File Format - Part 1 by Matt Pietrek](http://www.delphibasics.info/home/delphibasicsarticles/anin-depthlookintothewin32portableexecutablefileformat-part1) - [An In-Depth Look into the Win32 Portable Executable File Format - Part 2 by Matt Pietrek](http://www.delphibasics.info/home/delphibasicsarticles/anin-depthlookintothewin32portableexecutablefileformat-part2) - [Portable Executable File Format](https://blog.kowalczyk.info/articles/pefileformat.html) - [PE Format MSDN spec](https://docs.microsoft.com/en-us/windows/win32/debug/pe-format) - [DotNET format](https://www.ntcore.com/files/dotnetformat.htm) - [BlackHat 2011 - CONSTANT INSECURITY: (PECOFF) Portable Executable FIle Format](https://www.youtube.com/watch?v=uoQL3CE24ls) ================================================ FILE: anomaly.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "time" ) // Anomalies found in a PE var ( // AnoPEHeaderOverlapDOSHeader is reported when the PE headers overlaps with the DOS header. AnoPEHeaderOverlapDOSHeader = "PE header overlaps with DOS header" // AnoPETimeStampNull is reported when the file header timestamp is 0. AnoPETimeStampNull = "file header timestamp set to 0" // AnoPETimeStampFuture is reported when the file header timestamp is more // than one day ahead of the current date timestamp. AnoPETimeStampFuture = "file header timestamp set to 0" // NumberOfSections is reported when number of sections is larger or equal than 10. AnoNumberOfSections10Plus = "number of sections is 10+" // AnoNumberOfSectionsNull is reported when sections count's is 0. AnoNumberOfSectionsNull = "number of sections is 0" // AnoSizeOfOptionalHeaderNull is reported when size of optional header is 0. AnoSizeOfOptionalHeaderNull = "size of optional header is 0" // AnoUncommonSizeOfOptionalHeader32 is reported when size of optional // header for PE32 is larger than 0xE0. AnoUncommonSizeOfOptionalHeader32 = "size of optional header is larger than 0xE0 (PE32)" // AnoUncommonSizeOfOptionalHeader64 is reported when size of optional // header for PE32+ is larger than 0xF0. AnoUncommonSizeOfOptionalHeader64 = "size of optional header is larger than 0xF0 (PE32+)" // AnoAddressOfEntryPointNull is reported when address of entry point is 0. AnoAddressOfEntryPointNull = "address of entry point is 0" // AnoAddressOfEPLessSizeOfHeaders is reported when address of entry point // is smaller than size of headers, the file cannot run under Windows. AnoAddressOfEPLessSizeOfHeaders = "address of entry point is smaller than size of headers, " + "the file cannot run under Windows 8" // AnoImageBaseNull is reported when the image base is null. AnoImageBaseNull = "image base is 0" // AnoDanSMagicOffset is reported when the `DanS` magic offset is different than 0x80. AnoDanSMagicOffset = "`DanS` magic offset is different than 0x80" // ErrInvalidFileAlignment is reported when file alignment is larger than // 0x200 and not a power of 2. ErrInvalidFileAlignment = "FileAlignment larger than 0x200 and not a power of 2" // ErrInvalidSectionAlignment is reported when file alignment is lesser // than 0x200 and different from section alignment. ErrInvalidSectionAlignment = "FileAlignment lesser than 0x200 and different from section alignment" // AnoMajorSubsystemVersion is reported when MajorSubsystemVersion has a // value different than the standard 3 --> 6. AnoMajorSubsystemVersion = "MajorSubsystemVersion is outside 3<-->6 boundary" // AnonWin32VersionValue is reported when Win32VersionValue is different than 0 AnonWin32VersionValue = "Win32VersionValue is a reserved field, must be set to zero" // AnoInvalidPEChecksum is reported when the optional header checksum field // is different from what it should normally be. AnoInvalidPEChecksum = "optional header checksum is invalid" // AnoNumberOfRvaAndSizes is reported when NumberOfRvaAndSizes is different than 16. AnoNumberOfRvaAndSizes = "optional header NumberOfRvaAndSizes != 16" // AnoReservedDataDirectoryEntry is reported when the last data directory entry is not zero. AnoReservedDataDirectoryEntry = "last data directory entry is a reserved field, must be set to zero" // AnoCOFFSymbolsCount is reported when the number of COFF symbols is absurdly high. AnoCOFFSymbolsCount = "COFF symbols count is absurdly high" // AnoRelocationEntriesCount is reported when the number of relocation entries is absurdly high. AnoRelocationEntriesCount = "relocation entries count is absurdly high" ) // GetAnomalies reportes anomalies found in a PE binary. // These nomalies does prevent the Windows loader from loading the files but // is an interesting features for malware analysis. func (pe *File) GetAnomalies() error { // ******************** Anomalies in File header ************************ // An application for Windows NT typically has the nine predefined sections // named: .text, .bss, .rdata, .data, .rsrc, .edata, .idata, .pdata, and // .debug. Some applications do not need all of these sections, while // others may define still more sections to suit their specific needs. // NumberOfSections can be up to 96 under XP. // NumberOfSections can be up to 65535 under Vista and later. if pe.NtHeader.FileHeader.NumberOfSections >= 10 { pe.Anomalies = append(pe.Anomalies, AnoNumberOfSections10Plus) } // File header timestamp set to 0. if pe.NtHeader.FileHeader.TimeDateStamp == 0 { pe.Anomalies = append(pe.Anomalies, AnoPETimeStampNull) } // File header timestamp set to the future. now := time.Now() future := uint32(now.Add(24 * time.Hour).Unix()) if pe.NtHeader.FileHeader.TimeDateStamp > future { pe.Anomalies = append(pe.Anomalies, AnoPETimeStampFuture) } // NumberOfSections can be null with low alignment PEs // and in this case, the values are just checked but not really used (under XP) if pe.NtHeader.FileHeader.NumberOfSections == 0 { pe.Anomalies = append(pe.Anomalies, AnoNumberOfSectionsNull) } // SizeOfOptionalHeader is not the size of the optional header, but the delta // between the top of the Optional header and the start of the section table. // Thus, it can be null (the section table will overlap the Optional Header, // or can be null when no sections are present) if pe.NtHeader.FileHeader.SizeOfOptionalHeader == 0 { pe.Anomalies = append(pe.Anomalies, AnoSizeOfOptionalHeaderNull) } // SizeOfOptionalHeader can be bigger than the file // (the section table will be in virtual space, full of zeroes), but can't be negative. // Do some check here. oh32 := ImageOptionalHeader32{} oh64 := ImageOptionalHeader64{} // SizeOfOptionalHeader standard value is 0xE0 for PE32. if pe.Is32 && pe.NtHeader.FileHeader.SizeOfOptionalHeader > uint16(binary.Size(oh32)) { pe.Anomalies = append(pe.Anomalies, AnoUncommonSizeOfOptionalHeader32) } // SizeOfOptionalHeader standard value is 0xF0 for PE32+. if pe.Is64 && pe.NtHeader.FileHeader.SizeOfOptionalHeader > uint16(binary.Size(oh64)) { pe.Anomalies = append(pe.Anomalies, AnoUncommonSizeOfOptionalHeader64) } // ***************** Anomalies in Optional header ********************* // Under Windows 8, AddressOfEntryPoint is not allowed to be smaller than // SizeOfHeaders, except if it's null. switch pe.Is64 { case true: oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) case false: oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) } // Use oh for fields which are common for both structures. oh := oh32 if oh.AddressOfEntryPoint != 0 && oh.AddressOfEntryPoint < oh.SizeOfHeaders { pe.Anomalies = append(pe.Anomalies, AnoAddressOfEPLessSizeOfHeaders) } // AddressOfEntryPoint can be null in DLLs: in this case, // DllMain is just not called. can be null if oh.AddressOfEntryPoint == 0 { pe.Anomalies = append(pe.Anomalies, AnoAddressOfEntryPointNull) } // ImageBase can be null, under XP. // In this case, the binary will be relocated to 10000h if (pe.Is64 && oh64.ImageBase == 0) || (pe.Is32 && oh32.ImageBase == 0) { pe.Anomalies = append(pe.Anomalies, AnoImageBaseNull) } // The msdn states that SizeOfImage must be a multiple of the section // alignment. This is not a requirement though. Adding it as anomaly. // Todo: raise an anomaly when SectionAlignment is NULL ? if oh.SectionAlignment != 0 && oh.SizeOfImage%oh.SectionAlignment != 0 { pe.Anomalies = append(pe.Anomalies, AnoInvalidSizeOfImage) } // For DLLs, MajorSubsystemVersion is ignored until Windows 8. It can have // any value. Under Windows 8, it needs a standard value (3.10 < 6.30). if oh.MajorSubsystemVersion < 3 || oh.MajorSubsystemVersion > 6 { pe.Anomalies = append(pe.Anomalies, AnoMajorSubsystemVersion) } // Win32VersionValue officially defined as `reserved` and should be null // if non null, it overrides MajorVersion/MinorVersion/BuildNumber/PlatformId // OperatingSystem Versions values located in the PEB, after loading. if oh.Win32VersionValue != 0 { pe.Anomalies = append(pe.Anomalies, AnonWin32VersionValue) } // Checksums are required for kernel-mode drivers and some system DLLs. // Otherwise, this field can be 0. if pe.Checksum() != oh.CheckSum && oh.CheckSum != 0 { pe.Anomalies = append(pe.Anomalies, AnoInvalidPEChecksum) } // This field contains the number of IMAGE_DATA_DIRECTORY entries. // This field has been 16 since the earliest releases of Windows NT. if (pe.Is64 && oh64.NumberOfRvaAndSizes == 0xA) || (pe.Is32 && oh32.NumberOfRvaAndSizes == 0xA) { pe.Anomalies = append(pe.Anomalies, AnoNumberOfRvaAndSizes) } return nil } // addAnomaly appends the given anomaly to the list of anomalies. func (pe *File) addAnomaly(anomaly string) { if !stringInSlice(anomaly, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, anomaly) } } ================================================ FILE: anomaly_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) func TestGetAnomalies(t *testing.T) { tests := []struct { in string out []string }{ { getAbsoluteFilePath( "test/050708404553416d103652a7ca1f887ab81f533a019a0eeff0e6bb460a202cde"), []string{AnoReservedDataDirectoryEntry}, }, { getAbsoluteFilePath( "test/0585495341e0ffaae1734acb78708ff55cd3612d844672d37226ef63d12652d0"), []string{AnoAddressOfEntryPointNull, AnoMajorSubsystemVersion}, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } err = file.GetAnomalies() if err != nil { t.Fatalf("GetAnomalies(%s) failed, reason: %v", tt.in, err) } for _, ano := range tt.out { if !stringInSlice(ano, file.Anomalies) { t.Errorf("anomaly(%s) not found in anomalies, got: %v", ano, file.Anomalies) } } }) } } ================================================ FILE: arch.go ================================================ // Copyright 2022 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe // Architecture-specific data. This data directory is not used // (set to all zeros) for I386, IA64, or AMD64 architecture. func (pe *File) parseArchitectureDirectory(rva, size uint32) error { return nil } ================================================ FILE: boundimports.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) const ( // MaxStringLength represents the maximum length of a string to be retrieved // from the file. It's there to prevent loading massive amounts of data from // memory mapped files. Strings longer than 0x100B should be rather rare. MaxStringLength = uint32(0x100) ) // ImageBoundImportDescriptor represents the IMAGE_BOUND_IMPORT_DESCRIPTOR. type ImageBoundImportDescriptor struct { // TimeDateStamp is just the value from the Exports information of the DLL // which is being imported from. TimeDateStamp uint32 `json:"time_date_stamp"` // Offset of the DLL name counted from the beginning of the BOUND_IMPORT table. OffsetModuleName uint16 `json:"offset_module_name"` // Number of forwards, NumberOfModuleForwarderRefs uint16 `json:"number_of_module_forwarder_refs"` // Array of zero or more IMAGE_BOUND_FORWARDER_REF follows. } // ImageBoundForwardedRef represents the IMAGE_BOUND_FORWARDER_REF. type ImageBoundForwardedRef struct { TimeDateStamp uint32 `json:"time_date_stamp"` OffsetModuleName uint16 `json:"offset_module_name"` Reserved uint16 `json:"reserved"` } // BoundImportDescriptorData represents the descriptor in addition to forwarded refs. type BoundImportDescriptorData struct { Struct ImageBoundImportDescriptor `json:"struct"` Name string `json:"name"` ForwardedRefs []BoundForwardedRefData `json:"forwarded_refs"` } // BoundForwardedRefData represents the struct in addition to the dll name. type BoundForwardedRefData struct { Struct ImageBoundForwardedRef `json:"struct"` Name string `json:"name"` } // This table is an array of bound import descriptors, each of which describes // a DLL this image was bound up with at the time of the image creation. // The descriptors also carry the time stamps of the bindings, and if the // bindings are up-to-date, the OS loader uses these bindings as a “shortcut” // for API import. Otherwise, the loader ignores the bindings and resolves the // imported APIs through the Import tables. func (pe *File) parseBoundImportDirectory(rva, size uint32) (err error) { var sectionsAfterOffset []uint32 var safetyBoundary uint32 var start = rva for { bndDesc := ImageBoundImportDescriptor{} bndDescSize := uint32(binary.Size(bndDesc)) err = pe.structUnpack(&bndDesc, rva, bndDescSize) // If the RVA is invalid all would blow up. Some EXEs seem to be // specially nasty and have an invalid RVA. if err != nil { return err } // If the structure is all zeros, we reached the end of the list. if bndDesc == (ImageBoundImportDescriptor{}) { break } rva += bndDescSize sectionsAfterOffset = nil fileOffset := pe.GetOffsetFromRva(rva) section := pe.getSectionByRva(rva) if section == nil { safetyBoundary = pe.size - fileOffset for _, section := range pe.Sections { if section.Header.PointerToRawData > fileOffset { sectionsAfterOffset = append( sectionsAfterOffset, section.Header.PointerToRawData) } } if len(sectionsAfterOffset) > 0 { // Find the first section starting at a later offset than that // specified by 'rva' firstSectionAfterOffset := Min(sectionsAfterOffset) section = pe.getSectionByOffset(firstSectionAfterOffset) if section != nil { safetyBoundary = section.Header.PointerToRawData - fileOffset } } } else { sectionLen := uint32(len(section.Data(0, 0, pe))) safetyBoundary = (section.Header.PointerToRawData + sectionLen) - fileOffset } if section == nil { pe.logger.Warnf("RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points to an invalid address: 0x%x", rva) return nil } bndFrwdRef := ImageBoundForwardedRef{} bndFrwdRefSize := uint32(binary.Size(bndFrwdRef)) count := min(uint32(bndDesc.NumberOfModuleForwarderRefs), safetyBoundary/bndFrwdRefSize) forwarderRefs := make([]BoundForwardedRefData, 0) for i := uint32(0); i < count; i++ { err = pe.structUnpack(&bndFrwdRef, rva, bndFrwdRefSize) if err != nil { return err } rva += bndFrwdRefSize offset := start + uint32(bndFrwdRef.OffsetModuleName) DllNameBuff := string(pe.GetStringFromData(0, pe.data[offset:offset+MaxStringLength])) DllName := string(DllNameBuff) // OffsetModuleName points to a DLL name. These shouldn't be too long. // Anything longer than a safety length of 128 will be taken to indicate // a corrupt entry and abort the processing of these entries. // Names shorter than 4 characters will be taken as invalid as well. if DllName != "" && (len(DllName) > 256 || !IsPrintable(DllName)) { break } forwarderRefs = append(forwarderRefs, BoundForwardedRefData{ Struct: bndFrwdRef, Name: DllName}) } offset := start + uint32(bndDesc.OffsetModuleName) DllNameBuff := pe.GetStringFromData(0, pe.data[offset:offset+MaxStringLength]) DllName := string(DllNameBuff) if DllName != "" && (len(DllName) > 256 || !IsPrintable(DllName)) { break } pe.BoundImports = append(pe.BoundImports, BoundImportDescriptorData{ Struct: bndDesc, Name: DllName, ForwardedRefs: forwarderRefs}) } if len(pe.BoundImports) > 0 { pe.HasBoundImp = true } return nil } ================================================ FILE: boundimports_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestBoundImportEntry struct { entryCount int entryIndex int entry BoundImportDescriptorData errOutOfBounds error } func TestBoundImportDirectory(t *testing.T) { tests := []struct { in string out TestBoundImportEntry }{ { getAbsoluteFilePath("test/mfc40u.dll"), TestBoundImportEntry{ entryCount: 4, entryIndex: 0, entry: BoundImportDescriptorData{ Struct: ImageBoundImportDescriptor{ TimeDateStamp: 0x31CB50F3, OffsetModuleName: 0x38, NumberOfModuleForwarderRefs: 0x1, }, Name: "MSVCRT40.dll", ForwardedRefs: []BoundForwardedRefData{ { Struct: ImageBoundForwardedRef{ TimeDateStamp: 0x3B7DFE0E, OffsetModuleName: 0x45, Reserved: 0x0, }, Name: "msvcrt.DLL", }, }, }, errOutOfBounds: nil, }, }, { // fake bound imports directory getAbsoluteFilePath("test/0044e1870806c048a7558082d4482d1650dcd3ea73152ed2218a554983130721"), TestBoundImportEntry{ errOutOfBounds: ErrOutsideBoundary, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryBoundImport] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryBoundImport] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseBoundImportDirectory(va, size) if err != tt.out.errOutOfBounds { t.Fatalf("parseBoundImportDirectory(%s) failed, reason: %v", tt.in, err) } got := file.BoundImports if len(got) != tt.out.entryCount { t.Errorf("bound imports entry count assertion failed, got %v, want %v", len(got), tt.out.entryCount) } if len(file.BoundImports) > 0 { boundImportEntry := file.BoundImports[tt.out.entryIndex] if !reflect.DeepEqual(boundImportEntry, tt.out.entry) { t.Errorf("bound import entry assertion failed, got %v, want %v", boundImportEntry, tt.out.entry) } } }) } } ================================================ FILE: cmd/dump.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package main import ( "bytes" "encoding/binary" "encoding/hex" "encoding/json" "fmt" "os" "path/filepath" "reflect" "strings" "sync" "text/tabwriter" "time" "unicode" "unsafe" peparser "github.com/saferwall/pe" "github.com/saferwall/pe/log" ) var ( wg sync.WaitGroup jobs chan string = make(chan string) ) func loopFilesWorker(cfg config) error { for path := range jobs { files, err := os.ReadDir(path) if err != nil { wg.Done() return err } for _, file := range files { if !file.IsDir() { fullpath := filepath.Join(path, file.Name()) parsePE(fullpath, cfg) } } wg.Done() } return nil } func LoopDirsFiles(path string) error { files, err := os.ReadDir(path) if err != nil { return err } go func() { wg.Add(1) jobs <- path }() for _, file := range files { if file.IsDir() { LoopDirsFiles(filepath.Join(path, file.Name())) } } return nil } func prettyPrint(iface interface{}) string { var prettyJSON bytes.Buffer buff, _ := json.Marshal(iface) err := json.Indent(&prettyJSON, buff, "", "\t") if err != nil { log.Errorf("JSON parse error: %v", err) return string(buff) } return prettyJSON.String() } func humanizeTimestamp(ts uint32) string { unixTimeUTC := time.Unix(int64(ts), 0) return unixTimeUTC.String() } func hexDump(b []byte) { var a [16]byte n := (len(b) + 15) &^ 15 for i := 0; i < n; i++ { if i%16 == 0 { fmt.Printf("%4d", i) } if i%8 == 0 { fmt.Print(" ") } if i < len(b) { fmt.Printf(" %02X", b[i]) } else { fmt.Print(" ") } if i >= len(b) { a[i%16] = ' ' } else if b[i] < 32 || b[i] > 126 { a[i%16] = '.' } else { a[i%16] = b[i] } if i%16 == 15 { fmt.Printf(" %s\n", string(a[:])) } } } func hexDumpSize(b []byte, size int) { var a [16]byte // Append null bytes when length of the buffer // is smaller than the requested size. if len(b) < size { temp := make([]byte, size) copy(temp, b) b = temp } n := (size + 15) &^ 15 for i := 0; i < n; i++ { if i%16 == 0 { fmt.Printf("%4d", i) } if i%8 == 0 { fmt.Print(" ") } if i < len(b) { fmt.Printf(" %02X", b[i]) } else { fmt.Print(" ") } if i >= len(b) { a[i%16] = ' ' } else if b[i] < 32 || b[i] > 126 { a[i%16] = '.' } else { a[i%16] = b[i] } if i%16 == 15 { fmt.Printf(" %s\n", string(a[:])) } } } func IntToByteArray(num uint64) []byte { size := int(unsafe.Sizeof(num)) arr := make([]byte, size) for i := 0; i < size; i++ { byt := *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&num)) + uintptr(i))) arr[i] = byt } return arr } func sentenceCase(s string) string { newString := string(s[0]) for i, r := range s[1:] { if unicode.IsLower(r) && unicode.IsLetter(r) { newString += string(r) } else { if i < len(s)-2 { nextChar := rune(s[i+2]) previousChar := rune(s[i]) if unicode.IsLower(previousChar) && unicode.IsLetter(previousChar) { newString += " " + string(r) } else { if unicode.IsLower(nextChar) && unicode.IsLetter(nextChar) { newString += " " + string(r) } else { newString += string(r) } } } } } return newString } func isDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.IsDir() } func parse(filePath string, cfg config) { // filePath points to a file. if !isDirectory(filePath) { parsePE(filePath, cfg) } else { // filePath points to a directory, // walk recursively through all files. fileList := []string{} filepath.Walk(filePath, func(path string, f os.FileInfo, err error) error { if !isDirectory(path) { fileList = append(fileList, path) } return nil }) for _, file := range fileList { parsePE(file, cfg) } } } func parsePE(filename string, cfg config) { logger := log.NewStdLogger(os.Stdout) logger = log.NewFilter(logger, log.FilterLevel(log.LevelInfo)) log := log.NewHelper(logger) log.Infof("parsing filename %s", filename) data, _ := os.ReadFile(filename) pe, err := peparser.NewBytes(data, &peparser.Options{ Logger: logger, DisableCertValidation: false, Fast: false, }) if err != nil { log.Infof("Error while opening file: %s, reason: %s", filename, err) return } defer pe.Close() err = pe.Parse() if err != nil { if err != peparser.ErrDOSMagicNotFound { log.Infof("Error while parsing file: %s, reason: %s", filename, err) } return } // Dump all results to disk in JSON format. // f, err := os.Create("out.json") // if err != nil { // return // } // defer f.Close() // f.WriteString(prettyPrint(pe)) if cfg.wantDOSHeader { DOSHeader := pe.DOSHeader magic := string(IntToByteArray(uint64(DOSHeader.Magic))) signature := string(IntToByteArray(uint64(pe.NtHeader.Signature))) w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) fmt.Print("\n\t------[ DOS Header ]------\n\n") fmt.Fprintf(w, "Magic:\t 0x%x (%s)\n", DOSHeader.Magic, magic) fmt.Fprintf(w, "Bytes On Last Page Of File:\t 0x%x\n", DOSHeader.BytesOnLastPageOfFile) fmt.Fprintf(w, "Pages In File:\t 0x%x\n", DOSHeader.PagesInFile) fmt.Fprintf(w, "Relocations:\t 0x%x\n", DOSHeader.Relocations) fmt.Fprintf(w, "Size Of Header:\t 0x%x\n", DOSHeader.SizeOfHeader) fmt.Fprintf(w, "Min Extra Paragraphs Needed:\t 0x%x\n", DOSHeader.MinExtraParagraphsNeeded) fmt.Fprintf(w, "Max Extra Paragraphs Needed:\t 0x%x\n", DOSHeader.MaxExtraParagraphsNeeded) fmt.Fprintf(w, "Initial SS:\t 0x%x\n", DOSHeader.InitialSS) fmt.Fprintf(w, "Initial SP:\t 0x%x\n", DOSHeader.InitialSP) fmt.Fprintf(w, "Checksum:\t 0x%x\n", DOSHeader.Checksum) fmt.Fprintf(w, "Initial IP:\t 0x%x\n", DOSHeader.InitialIP) fmt.Fprintf(w, "Initial CS:\t 0x%x\n", DOSHeader.InitialCS) fmt.Fprintf(w, "Address Of Relocation Table:\t 0x%x\n", DOSHeader.AddressOfRelocationTable) fmt.Fprintf(w, "Overlay Number:\t 0x%x\n", DOSHeader.OverlayNumber) fmt.Fprintf(w, "OEM Identifier:\t 0x%x\n", DOSHeader.OEMIdentifier) fmt.Fprintf(w, "OEM Information:\t 0x%x\n", DOSHeader.OEMInformation) fmt.Fprintf(w, "Address Of New EXE Header:\t 0x%x (%s)\n", DOSHeader.AddressOfNewEXEHeader, signature) w.Flush() } if cfg.wantRichHeader && pe.FileInfo.HasRichHdr { richHeader := pe.RichHeader fmt.Printf("\nRICH HEADER\n***********\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) fmt.Fprintf(w, "\t0x%x\t XOR Key\n", richHeader.XORKey) fmt.Fprintf(w, "\t0x%x\t DanS offset\n", richHeader.DansOffset) fmt.Fprintf(w, "\t0x%x\t Checksum\n\n", pe.RichHeaderChecksum()) fmt.Fprintln(w, "ProductID\tMinorCV\tCount\tUnmasked\tMeaning\tVSVersion\t") for _, compID := range pe.RichHeader.CompIDs { fmt.Fprintf(w, "0x%x\t0x%x\t0x%x\t0x%x\t%s\t%s\t\n", compID.ProdID, compID.MinorCV, compID.Count, compID.Unmasked, peparser.ProdIDtoStr(compID.ProdID), peparser.ProdIDtoVSversion(compID.ProdID)) } w.Flush() fmt.Print("\n ---Raw header dump---\n") hexDump(richHeader.Raw) } if cfg.wantNTHeader { ntHeader := pe.NtHeader.FileHeader w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) characteristics := strings.Join(ntHeader.Characteristics.String(), " | ") fmt.Print("\n\t------[ File Header ]------\n\n") fmt.Fprintf(w, "Machine:\t 0x%x (%s)\n", int(ntHeader.Machine), ntHeader.Machine.String()) fmt.Fprintf(w, "Number Of Sections:\t 0x%x\n", ntHeader.NumberOfSections) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s)\n", ntHeader.TimeDateStamp, humanizeTimestamp(ntHeader.TimeDateStamp)) fmt.Fprintf(w, "Pointer To Symbol Table:\t 0x%x\n", ntHeader.PointerToSymbolTable) fmt.Fprintf(w, "Number Of Symbols:\t 0x%x\n", ntHeader.NumberOfSymbols) fmt.Fprintf(w, "Number Of Symbols:\t 0x%x\n", ntHeader.NumberOfSymbols) fmt.Fprintf(w, "Size Of Optional Header:\t 0x%x\n", ntHeader.SizeOfOptionalHeader) fmt.Fprintf(w, "Characteristics:\t 0x%x (%s)\n", ntHeader.Characteristics, characteristics) w.Flush() fmt.Print("\n\t------[ Optional Header ]------\n\n") if pe.Is64 { oh := pe.NtHeader.OptionalHeader.(peparser.ImageOptionalHeader64) dllCharacteristics := strings.Join(oh.DllCharacteristics.String(), " | ") fmt.Fprintf(w, "Magic:\t 0x%x (%s)\n", oh.Magic, pe.PrettyOptionalHeaderMagic()) fmt.Fprintf(w, "Major Linker Version:\t 0x%x\n", oh.MajorLinkerVersion) fmt.Fprintf(w, "Minor Linker Version:\t 0x%x\n", oh.MinorLinkerVersion) fmt.Fprintf(w, "Size Of Code:\t 0x%x (%s)\n", oh.SizeOfCode, BytesSize(float64(oh.SizeOfCode))) fmt.Fprintf(w, "Size Of Initialized Data:\t 0x%x (%s)\n", oh.SizeOfInitializedData, BytesSize(float64(oh.SizeOfInitializedData))) fmt.Fprintf(w, "Size Of Uninitialized Data:\t 0x%x (%s)\n", oh.SizeOfUninitializedData, BytesSize(float64(oh.SizeOfUninitializedData))) fmt.Fprintf(w, "Address Of Entry Point:\t 0x%x\n", oh.AddressOfEntryPoint) fmt.Fprintf(w, "Base Of Code:\t 0x%x\n", oh.BaseOfCode) fmt.Fprintf(w, "Image Base:\t 0x%x\n", oh.ImageBase) fmt.Fprintf(w, "Section Alignment:\t 0x%x (%s)\n", oh.SectionAlignment, BytesSize(float64(oh.SectionAlignment))) fmt.Fprintf(w, "File Alignment:\t 0x%x (%s)\n", oh.FileAlignment, BytesSize(float64(oh.FileAlignment))) fmt.Fprintf(w, "Major OS Version:\t 0x%x\n", oh.MajorOperatingSystemVersion) fmt.Fprintf(w, "Minor OS Version:\t 0x%x\n", oh.MinorOperatingSystemVersion) fmt.Fprintf(w, "Major Image Version:\t 0x%x\n", oh.MajorImageVersion) fmt.Fprintf(w, "Minor Image Version:\t 0x%x\n", oh.MinorImageVersion) fmt.Fprintf(w, "Major Subsystem Version:\t 0x%x\n", oh.MajorSubsystemVersion) fmt.Fprintf(w, "Minor Subsystem Version:\t 0x%x\n", oh.MinorSubsystemVersion) fmt.Fprintf(w, "Win32 Version Value:\t 0x%x\n", oh.Win32VersionValue) fmt.Fprintf(w, "Size Of Image:\t 0x%x (%s)\n", oh.SizeOfImage, BytesSize(float64(oh.SizeOfImage))) fmt.Fprintf(w, "Size Of Headers:\t 0x%x (%s)\n", oh.SizeOfHeaders, BytesSize(float64(oh.SizeOfHeaders))) fmt.Fprintf(w, "Checksum:\t 0x%x\n", oh.CheckSum) fmt.Fprintf(w, "Subsystem:\t 0x%x (%s)\n", uint16(oh.Subsystem), oh.Subsystem.String()) fmt.Fprintf(w, "Dll Characteristics:\t 0x%x (%s)\n", uint16(oh.DllCharacteristics), dllCharacteristics) fmt.Fprintf(w, "Size Of Stack Reserve:\t 0x%x (%s)\n", oh.SizeOfStackReserve, BytesSize(float64(oh.SizeOfStackReserve))) fmt.Fprintf(w, "Size Of Stack Commit:\t 0x%x (%s)\n", oh.SizeOfStackCommit, BytesSize(float64(oh.SizeOfStackCommit))) fmt.Fprintf(w, "Size Of Heap Reserve:\t 0x%x (%s)\n", oh.SizeOfHeapReserve, BytesSize(float64(oh.SizeOfHeapReserve))) fmt.Fprintf(w, "Size Of Heap Commit:\t 0x%x (%s)\n", oh.SizeOfHeapCommit, BytesSize(float64(oh.SizeOfHeapCommit))) fmt.Fprintf(w, "Loader Flags:\t 0x%x\n", oh.LoaderFlags) fmt.Fprintf(w, "Number Of RVA And Sizes:\t 0x%x\n", oh.NumberOfRvaAndSizes) fmt.Fprintf(w, "\n") for entry := peparser.ImageDirectoryEntry(0); entry < peparser.ImageNumberOfDirectoryEntries; entry++ { rva := oh.DataDirectory[entry].VirtualAddress size := oh.DataDirectory[entry].Size fmt.Fprintf(w, "%s Table:\t RVA: 0x%0.8x\t Size:0x%0.8x\t\n", entry.String(), rva, size) } } else { oh := pe.NtHeader.OptionalHeader.(peparser.ImageOptionalHeader32) dllCharacteristics := strings.Join(oh.DllCharacteristics.String(), " | ") fmt.Fprintf(w, "Magic:\t 0x%x (%s)\n", oh.Magic, pe.PrettyOptionalHeaderMagic()) fmt.Fprintf(w, "Major Linker Version:\t 0x%x\n", oh.MajorLinkerVersion) fmt.Fprintf(w, "Minor Linker Version:\t 0x%x\n", oh.MinorLinkerVersion) fmt.Fprintf(w, "Size Of Code:\t 0x%x (%s)\n", oh.SizeOfCode, BytesSize(float64(oh.SizeOfCode))) fmt.Fprintf(w, "Size Of Initialized Data:\t 0x%x (%s)\n", oh.SizeOfInitializedData, BytesSize(float64(oh.SizeOfInitializedData))) fmt.Fprintf(w, "Size Of Uninitialized Data:\t 0x%x (%s)\n", oh.SizeOfUninitializedData, BytesSize(float64(oh.SizeOfUninitializedData))) fmt.Fprintf(w, "Address Of Entry Point:\t 0x%x\n", oh.AddressOfEntryPoint) fmt.Fprintf(w, "Base Of Code:\t 0x%x\n", oh.BaseOfCode) fmt.Fprintf(w, "Image Base:\t 0x%x\n", oh.ImageBase) fmt.Fprintf(w, "Section Alignment:\t 0x%x (%s)\n", oh.SectionAlignment, BytesSize(float64(oh.SectionAlignment))) fmt.Fprintf(w, "File Alignment:\t 0x%x (%s)\n", oh.FileAlignment, BytesSize(float64(oh.FileAlignment))) fmt.Fprintf(w, "Major OS Version:\t 0x%x\n", oh.MajorOperatingSystemVersion) fmt.Fprintf(w, "Minor OS Version:\t 0x%x\n", oh.MinorOperatingSystemVersion) fmt.Fprintf(w, "Major Image Version:\t 0x%x\n", oh.MajorImageVersion) fmt.Fprintf(w, "Minor Image Version:\t 0x%x\n", oh.MinorImageVersion) fmt.Fprintf(w, "Major Subsystem Version:\t 0x%x\n", oh.MajorSubsystemVersion) fmt.Fprintf(w, "Minor Subsystem Version:\t 0x%x\n", oh.MinorSubsystemVersion) fmt.Fprintf(w, "Win32 Version Value:\t 0x%x\n", oh.Win32VersionValue) fmt.Fprintf(w, "Size Of Image:\t 0x%x (%s)\n", oh.SizeOfImage, BytesSize(float64(oh.SizeOfImage))) fmt.Fprintf(w, "Size Of Headers:\t 0x%x (%s)\n", oh.SizeOfHeaders, BytesSize(float64(oh.SizeOfHeaders))) fmt.Fprintf(w, "Checksum:\t 0x%x\n", oh.CheckSum) fmt.Fprintf(w, "Subsystem:\t 0x%x (%s)\n", uint16(oh.Subsystem), oh.Subsystem.String()) fmt.Fprintf(w, "Dll Characteristics:\t 0x%x (%s)\n", uint16(oh.DllCharacteristics), dllCharacteristics) fmt.Fprintf(w, "Size Of Stack Reserve:\t 0x%x (%s)\n", oh.SizeOfStackReserve, BytesSize(float64(oh.SizeOfStackReserve))) fmt.Fprintf(w, "Size Of Stack Commit:\t 0x%x (%s)\n", oh.SizeOfStackCommit, BytesSize(float64(oh.SizeOfStackCommit))) fmt.Fprintf(w, "Size Of Heap Reserve:\t 0x%x (%s)\n", oh.SizeOfHeapReserve, BytesSize(float64(oh.SizeOfHeapReserve))) fmt.Fprintf(w, "Size Of Heap Commit:\t 0x%x (%s)\n", oh.SizeOfHeapCommit, BytesSize(float64(oh.SizeOfHeapCommit))) fmt.Fprintf(w, "Loader Flags:\t 0x%x\n", oh.LoaderFlags) fmt.Fprintf(w, "Number Of RVA And Sizes:\t 0x%x\n", oh.NumberOfRvaAndSizes) fmt.Fprintf(w, "\n") for entry := peparser.ImageDirectoryEntry(0); entry < peparser.ImageNumberOfDirectoryEntries; entry++ { rva := oh.DataDirectory[entry].VirtualAddress size := oh.DataDirectory[entry].Size fmt.Fprintf(w, "%s Table:\t RVA: 0x%0.8x\t Size:0x%0.8x\t\n", entry.String(), rva, size) } } w.Flush() } if cfg.wantCOFF && pe.FileInfo.HasCOFF { fmt.Printf("\nCOFF\n****\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "Name\tValue\tSectionNumber\tType\tStorageClass\tNumberOfAuxSymbols\t") for _, sym := range pe.COFF.SymbolTable { symName, _ := sym.String(pe) fmt.Fprintf(w, "%s\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t\n", symName, sym.Value, sym.SectionNumber, sym.Type, sym.StorageClass, sym.NumberOfAuxSymbols) } w.Flush() } if cfg.wantSections && pe.FileInfo.HasSections { w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) for i, sec := range pe.Sections { hdr := sec.Header fmt.Printf("\n\t------[ Section Header #%d ]------\n\n", i) fmt.Fprintf(w, "Name:\t %v (%s)\n", hdr.Name, sec.String()) fmt.Fprintf(w, "Virtual Size:\t 0x%x (%s)\n", hdr.VirtualSize, BytesSize(float64(hdr.VirtualSize))) fmt.Fprintf(w, "Virtual Address:\t 0x%x\n", hdr.VirtualAddress) fmt.Fprintf(w, "Size Of Raw Data Size:\t 0x%x (%s)\n", hdr.SizeOfRawData, BytesSize(float64(hdr.SizeOfRawData))) fmt.Fprintf(w, "Pointer To Raw Data:\t 0x%x\n", hdr.PointerToRawData) fmt.Fprintf(w, "Pointer To Relocations:\t 0x%x\n", hdr.PointerToRelocations) fmt.Fprintf(w, "Pointer To Line Numbers:\t 0x%x\n", hdr.PointerToLineNumbers) fmt.Fprintf(w, "Number Of Relocations:\t 0x%x\n", hdr.NumberOfRelocations) fmt.Fprintf(w, "Number Of Line Numbers:\t 0x%x\n", hdr.NumberOfLineNumbers) fmt.Fprintf(w, "Characteristics:\t 0x%x (%s)\n", hdr.Characteristics, strings.Join(sec.PrettySectionFlags(), " | ")) fmt.Fprintf(w, "Entropy:\t %f\n", sec.CalculateEntropy(pe)) w.Flush() fmt.Fprintf(w, "\n") hexDumpSize(sec.Data(0, hdr.PointerToRawData, pe), 128) } } if cfg.wantImport && pe.FileInfo.HasImport { fmt.Printf("\nIMPORTS\n********\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) for _, imp := range pe.Imports { desc := imp.Descriptor fmt.Printf("\n\t------[ %s ]------\n\n", imp.Name) fmt.Fprintf(w, "Name:\t 0x%x\n", desc.Name) fmt.Fprintf(w, "Original First Thunk:\t 0x%x\n", desc.OriginalFirstThunk) fmt.Fprintf(w, "First Thunk:\t 0x%x\n", desc.FirstThunk) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s\n", desc.TimeDateStamp, humanizeTimestamp(desc.TimeDateStamp)) fmt.Fprintf(w, "Forwarder Chain:\t 0x%x\n", desc.ForwarderChain) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "Name\tThunkRVA\tThunkValue\tOriginalThunkRVA\tOriginalThunkValue\tHint\t") for _, impFunc := range imp.Functions { fmt.Fprintf(w, "%s\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t\n", impFunc.Name, impFunc.ThunkRVA, impFunc.ThunkValue, impFunc.OriginalThunkRVA, impFunc.OriginalThunkValue, impFunc.Hint) } w.Flush() } } if cfg.wantExport && pe.FileInfo.HasExport { fmt.Printf("\nEXPORTS\n********\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) expDir := pe.Export.Struct fmt.Printf("\n\t------[ %s ]------\n\n", pe.Export.Name) fmt.Fprintf(w, "Characteristics:\t 0x%x\n", expDir.Characteristics) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s\n", expDir.TimeDateStamp, humanizeTimestamp(expDir.TimeDateStamp)) fmt.Fprintf(w, "Major Version:\t 0x%x\n", expDir.MajorVersion) fmt.Fprintf(w, "Minor Version:\t 0x%x\n", expDir.MinorVersion) fmt.Fprintf(w, "Name:\t 0x%x\n", expDir.Name) fmt.Fprintf(w, "Base:\t 0x%x\n", expDir.Base) fmt.Fprintf(w, "Number Of Functions:\t 0x%x\n", expDir.NumberOfFunctions) fmt.Fprintf(w, "Number Of Names:\t 0x%x\n", expDir.NumberOfNames) fmt.Fprintf(w, "Address Of Functions:\t 0x%x\n", expDir.AddressOfFunctions) fmt.Fprintf(w, "Address Of Names:\t 0x%x\n", expDir.AddressOfNames) fmt.Fprintf(w, "Address Of Name Ordinals:\t 0x%x\n", expDir.AddressOfNameOrdinals) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "Name\tOrdinal\tNameRVA\tFunctionRVA\tForwardedTo\t") for _, exp := range pe.Export.Functions { fmt.Fprintf(w, "%s\t0x%x\t0x%x\t0x%x\t0x%x\t%s\t\n", exp.Name, exp.Ordinal, exp.NameRVA, exp.FunctionRVA, exp.ForwarderRVA, exp.Forwarder) } w.Flush() } if cfg.wantResource && pe.FileInfo.HasResource { var printRsrcDir func(rsrcDir peparser.ResourceDirectory) padding := 0 printRsrcDataEntry := func(entry peparser.ResourceDataEntry) { padding++ w := tabwriter.NewWriter(os.Stdout, 1, 1, padding, ' ', 0) imgRsrcDataEntry := entry.Struct fmt.Fprintf(w, "\n\t\u27A1 Resource Data Entry\n\t") fmt.Fprintf(w, "|- Offset To Data: 0x%x\n\t", imgRsrcDataEntry.OffsetToData) fmt.Fprintf(w, "|- Size: 0x%x\n\t", imgRsrcDataEntry.Size) fmt.Fprintf(w, "|- Code Page: 0x%x\n\t", imgRsrcDataEntry.CodePage) fmt.Fprintf(w, "|- Reserved: 0x%x\n\t", imgRsrcDataEntry.Reserved) fmt.Fprintf(w, "|- Language: %d (%s)\n\t", entry.Lang, entry.Lang.String()) fmt.Fprintf(w, "|- Sub-language: %s\n\t", peparser.PrettyResourceLang(entry.Lang, int(entry.SubLang))) w.Flush() padding-- } printRsrcDir = func(rsrcDir peparser.ResourceDirectory) { padding++ w := tabwriter.NewWriter(os.Stdout, 1, 1, padding, ' ', 0) imgRsrcDir := rsrcDir.Struct fmt.Fprintf(w, "\n\t\u27A1 Resource Directory\n\t") fmt.Fprintf(w, "|- Characteristics: 0x%x\n\t", imgRsrcDir.Characteristics) fmt.Fprintf(w, "|- TimeDateStamp: 0x%x\n\t", imgRsrcDir.TimeDateStamp) fmt.Fprintf(w, "|- Major Version: 0x%x\n\t", imgRsrcDir.MajorVersion) fmt.Fprintf(w, "|- Minor Version: 0x%x\n\t", imgRsrcDir.MinorVersion) fmt.Fprintf(w, "|- Number Of Named Entries: 0x%x\n\t", imgRsrcDir.NumberOfNamedEntries) fmt.Fprintf(w, "|- Number Of ID Entries: 0x%x\n\t", imgRsrcDir.NumberOfIDEntries) fmt.Fprintf(w, "|----------------------------------\n\t") padding++ w.Flush() w = tabwriter.NewWriter(os.Stdout, 1, 1, padding, ' ', 0) for i, entry := range rsrcDir.Entries { fmt.Fprintf(w, "\t|- \u27A1 Resource Directory Entry %d, ID: %d", i+1, entry.ID) // Print the interpretation of a resource ID only in root node. if padding == 2 { if entry.ID <= peparser.RTManifest { fmt.Fprintf(w, " (%s)", peparser.ResourceType(entry.ID).String()) } } fmt.Fprintf(w, "\n\t|- Name: 0x%x\n\t", entry.Struct.Name) if entry.Name != "" { fmt.Fprintf(w, " (%s)", entry.Name) } fmt.Fprintf(w, "|- Offset To Data: 0x%x\t", entry.Struct.OffsetToData) fmt.Fprintf(w, "\n\t|----------------------------------\t") w.Flush() if entry.IsResourceDir { printRsrcDir(entry.Directory) } else { printRsrcDataEntry(entry.Data) } } padding -= 2 } fmt.Printf("\nRESOURCES\n**********\n") printRsrcDir(pe.Resources) versionInfo, err := pe.ParseVersionResources() if err != nil { log.Errorf("failed to parse version resources: %v", err) } else { fmt.Printf("\nVersion Info: %v", prettyPrint(versionInfo)) } } if cfg.wantException && pe.FileInfo.HasException { fmt.Printf("\nEXCEPTIONS\n***********\n") for _, exception := range pe.Exceptions { entry := exception.RuntimeFunction fmt.Printf("\n\u27A1 BeginAddress: 0x%x EndAddress:0x%x UnwindInfoAddress:0x%x\t\n", entry.BeginAddress, entry.EndAddress, entry.UnwindInfoAddress) ui := exception.UnwindInfo handlerFlags := peparser.PrettyUnwindInfoHandlerFlags(ui.Flags) prettyFlags := strings.Join(handlerFlags, ",") fmt.Printf("|- Version: 0x%x\n", ui.Version) fmt.Printf("|- Flags: 0x%x", ui.Flags) if ui.Flags == 0 { fmt.Print(" (None)\n") } else { fmt.Printf(" (%s)\n", prettyFlags) } fmt.Printf("|- Size Of Prolog: 0x%x\n", ui.SizeOfProlog) fmt.Printf("|- Count Of Codes: 0x%x\n", ui.CountOfCodes) fmt.Printf("|- Exception Handler: 0x%x\n", ui.ExceptionHandler) fmt.Print("|- Unwind codes:\n") for _, uc := range ui.UnwindCodes { fmt.Printf("|- * %.2x: %s, %s\n", uc.CodeOffset, uc.UnwindOp.String(), uc.Operand) } } } if cfg.wantCertificate && pe.FileInfo.HasCertificate { fmt.Printf("\nSECURITY\n*********\n") cert := pe.Certificates w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "Length\tRevision\tCertificateType\t") fmt.Fprintf(w, "0x%x\t0x%x\t0x%x\t\n", cert.Header.Length, cert.Header.Revision, cert.Header.CertificateType) w.Flush() fmt.Print("\n ---Raw Certificate dump---\n") hexDump(cert.Raw) for _, cert := range cert.Certificates { fmt.Print("\n---Certificate ---\n\n") fmt.Fprintf(w, "Issuer Name:\t %s\n", cert.Info.Issuer) fmt.Fprintf(w, "Subject Name:\t %s\n", cert.Info.Subject) fmt.Fprintf(w, "Serial Number:\t %x\n", cert.Info.SerialNumber) fmt.Fprintf(w, "Validity From:\t %s to %s\n", cert.Info.NotBefore.String(), cert.Info.NotAfter.String()) fmt.Fprintf(w, "Signature Algorithm:\t %s\n", cert.Info.SignatureAlgorithm.String()) fmt.Fprintf(w, "PublicKey Algorithm:\t %s\n", cert.Info.PublicKeyAlgorithm.String()) fmt.Fprintf(w, "Certificate valid:\t %v\n", cert.Verified) fmt.Fprintf(w, "Signature valid:\t %v\n", cert.SignatureValid) w.Flush() } // Calculate the PE authentihash. pe.Authentihash() } if cfg.wantReloc && pe.FileInfo.HasReloc { fmt.Printf("\nRELOCATIONS\n***********\n") for _, reloc := range pe.Relocations { fmt.Printf("\n\u27A1 Virtual Address: 0x%x | Size Of Block:0x%x | Entries Count:0x%x\t\n", reloc.Data.VirtualAddress, reloc.Data.SizeOfBlock, len(reloc.Entries)) fmt.Print("|- Entries:\n") for _, relocEntry := range reloc.Entries { fmt.Printf("|- Data: 0x%x | Offset: 0x%x | Type:0x%x (%s)\n", relocEntry.Data, relocEntry.Offset, relocEntry.Type, relocEntry.Type.String(pe)) } } } if cfg.wantDebug && pe.FileInfo.HasDebug { fmt.Printf("\nDEBUGS\n*******\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) for _, debug := range pe.Debugs { imgDbgDir := debug.Struct fmt.Fprintf(w, "\n\t------[ %s ]------\n", debug.Type) fmt.Fprintf(w, "Characteristics:\t 0x%x\n", imgDbgDir.Characteristics) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s)\n", imgDbgDir.TimeDateStamp, humanizeTimestamp(imgDbgDir.TimeDateStamp)) fmt.Fprintf(w, "Major Version:\t 0x%x\n", imgDbgDir.MajorVersion) fmt.Fprintf(w, "Minor Version:\t 0x%x\n", imgDbgDir.MinorVersion) fmt.Fprintf(w, "Type:\t 0x%x\n", imgDbgDir.Type) fmt.Fprintf(w, "Size Of Data:\t 0x%x (%s)\n", imgDbgDir.SizeOfData, BytesSize(float64(imgDbgDir.SizeOfData))) fmt.Fprintf(w, "Address Of Raw Data:\t 0x%x\n", imgDbgDir.AddressOfRawData) fmt.Fprintf(w, "Pointer To Raw Data:\t 0x%x\n", imgDbgDir.PointerToRawData) fmt.Fprintf(w, "\n") switch imgDbgDir.Type { case peparser.ImageDebugTypeCodeView: debugSignature, err := pe.ReadUint32(imgDbgDir.PointerToRawData) if err != nil { continue } switch debugSignature { case peparser.CVSignatureRSDS: pdb := debug.Info.(peparser.CVInfoPDB70) fmt.Fprintf(w, "CV Signature:\t 0x%x (%s)\n", pdb.CVSignature, pdb.CVSignature.String()) fmt.Fprintf(w, "Signature:\t %s\n", pdb.Signature.String()) fmt.Fprintf(w, "Age:\t 0x%x\n", pdb.Age) fmt.Fprintf(w, "PDB FileName:\t %s\n", pdb.PDBFileName) case peparser.CVSignatureNB10: pdb := debug.Info.(peparser.CVInfoPDB20) fmt.Fprintf(w, "CV Header Signature:\t 0x%x (%s)\n", pdb.CVHeader.Signature, pdb.CVHeader.Signature.String()) fmt.Fprintf(w, "CV Header Offset:\t 0x%x\n", pdb.CVHeader.Offset) fmt.Fprintf(w, "Signature:\t 0x%x (%s)\n", pdb.Signature, humanizeTimestamp(pdb.Signature)) fmt.Fprintf(w, "Age:\t 0x%x\n", pdb.Age) fmt.Fprintf(w, "PDBFileName:\t %s\n", pdb.PDBFileName) } case peparser.ImageDebugTypePOGO: pogo := debug.Info.(peparser.POGO) if len(pogo.Entries) > 0 { fmt.Fprintf(w, "Signature:\t 0x%x (%s)\n\n", pogo.Signature, pogo.Signature.String()) fmt.Fprintln(w, "RVA\tSize\tName\tDescription\t") fmt.Fprintln(w, "---\t----\t----\t-----------\t") for _, pogoEntry := range pogo.Entries { fmt.Fprintf(w, "0x%x\t0x%x\t%s\t%s\t\n", pogoEntry.RVA, pogoEntry.Size, pogoEntry.Name, peparser.SectionAttributeDescription(pogoEntry.Name)) } } case peparser.ImageDebugTypeRepro: repro := debug.Info.(peparser.REPRO) fmt.Fprintf(w, "Hash:\t %x\n", repro.Hash) fmt.Fprintf(w, "Size:\t 0x%x (%s)\n", repro.Size, BytesSize(float64(repro.Size))) case peparser.ImageDebugTypeExDllCharacteristics: exDllCharacteristics := debug.Info.(peparser.DllCharacteristicsExType) fmt.Fprintf(w, "Value:\t %d (%s)\n", exDllCharacteristics, exDllCharacteristics.String()) case peparser.ImageDebugTypeVCFeature: VCFeature := debug.Info.(peparser.VCFeature) fmt.Fprintf(w, "Pre VC11:\t 0x%x\n", VCFeature.PreVC11) fmt.Fprintf(w, "C/C++:\t 0x%x\n", VCFeature.CCpp) fmt.Fprintf(w, "/GS:\t 0x%x\n", VCFeature.Gs) fmt.Fprintf(w, "/sdl:\t 0x%x\n", VCFeature.Sdl) fmt.Fprintf(w, "GuardN:\t 0x%x\n", VCFeature.GuardN) case peparser.ImageDebugTypeFPO: fpo := debug.Info.([]peparser.FPOData) if len(fpo) > 0 { fmt.Fprintln(w, "OffsetStart\tProcSize\tNumLocals\tParamsSize\tPrologLength\tSavedRegsCount\tHasSEH\tUseBP\tReserved\tFrameType\t") fmt.Fprintln(w, "------\t------\t------\t------\t------\t------\t------\t------\t------\t------\t") for _, fpoData := range fpo { fmt.Fprintf(w, "0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t%d (%s)\t\n", fpoData.OffsetStart, fpoData.ProcSize, fpoData.NumLocals, fpoData.ParamsSize, fpoData.PrologLength, fpoData.SavedRegsCount, fpoData.HasSEH, fpoData.UseBP, fpoData.Reserved, fpoData.FrameType, fpoData.FrameType.String()) } } } } w.Flush() } if cfg.wantBoundImp && pe.FileInfo.HasBoundImp { fmt.Printf("\nBOUND IMPORTS\n************\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) for _, bndImp := range pe.BoundImports { fmt.Printf("\n\t------[ %s ]------\n\n", bndImp.Name) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s)\n", bndImp.Struct.TimeDateStamp, humanizeTimestamp(bndImp.Struct.TimeDateStamp)) fmt.Fprintf(w, "Offset Module Name:\t 0x%x\n", bndImp.Struct.OffsetModuleName) fmt.Fprintf(w, "# Module Forwarder Refs:\t 0x%x\n", bndImp.Struct.NumberOfModuleForwarderRefs) fmt.Fprintf(w, "\n") if len(bndImp.ForwardedRefs) > 0 { fmt.Fprintln(w, "Name\tTimeDateStamp\tOffsetModuleName\tReserved\t") for _, fr := range bndImp.ForwardedRefs { fmt.Fprintf(w, "%s\t0x%x\t0x%x\t0x%x\t\n", fr.Name, fr.Struct.TimeDateStamp, fr.Struct.OffsetModuleName, fr.Struct.Reserved) } } w.Flush() } } if cfg.wantTLS && pe.FileInfo.HasTLS { fmt.Printf("\nTLS\n*****\n\n") tls := pe.TLS w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) if pe.Is64 { imgTLSDirectory64 := tls.Struct.(peparser.ImageTLSDirectory64) fmt.Fprintf(w, "Start Address Of Raw Data:\t 0x%x\n", imgTLSDirectory64.StartAddressOfRawData) fmt.Fprintf(w, "End Address Of Raw Data:\t 0x%x\n", imgTLSDirectory64.EndAddressOfRawData) fmt.Fprintf(w, "Address Of Index:\t %x\n", imgTLSDirectory64.AddressOfIndex) fmt.Fprintf(w, "Address Of CallBacks:\t 0x%x\n", imgTLSDirectory64.AddressOfCallBacks) fmt.Fprintf(w, "Size Of Zero Fill:\t 0x%x\n", imgTLSDirectory64.SizeOfZeroFill) fmt.Fprintf(w, "Characteristics:\t 0x%x (%s)\n", imgTLSDirectory64.Characteristics, imgTLSDirectory64.Characteristics.String()) fmt.Fprintf(w, "Callbacks:\n") if tls.Callbacks != nil && len(tls.Callbacks.([]uint64)) > 0 { for _, callback := range tls.Callbacks.([]uint64) { fmt.Fprintf(w, "0x%x\t\n", callback) } } } else { imgTLSDirectory32 := tls.Struct.(peparser.ImageTLSDirectory32) fmt.Fprintf(w, "Start Address Of Raw Data:\t 0x%x\n", imgTLSDirectory32.StartAddressOfRawData) fmt.Fprintf(w, "End Address Of Raw Data:\t 0x%x\n", imgTLSDirectory32.EndAddressOfRawData) fmt.Fprintf(w, "Address Of Index:\t %x\n", imgTLSDirectory32.AddressOfIndex) fmt.Fprintf(w, "Address Of CallBacks:\t 0x%x\n", imgTLSDirectory32.AddressOfCallBacks) fmt.Fprintf(w, "Size Of Zero Fill:\t 0x%x\n", imgTLSDirectory32.SizeOfZeroFill) fmt.Fprintf(w, "Characteristics:\t 0x%x (%s)\n", imgTLSDirectory32.Characteristics, imgTLSDirectory32.Characteristics.String()) fmt.Fprintf(w, "Callbacks:\n") if tls.Callbacks != nil && len(tls.Callbacks.([]uint32)) > 0 { for _, callback := range tls.Callbacks.([]uint32) { fmt.Fprintf(w, "0x%x\t\n", callback) } } } w.Flush() } if cfg.wantLoadCfg && pe.FileInfo.HasLoadCFG { fmt.Printf("\nLOAD CONFIG\n************\n\n") loadConfig := pe.LoadConfig w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.TabIndent) v := reflect.ValueOf(loadConfig.Struct) typeOfS := v.Type() imgLoadConfigDirectorySize := v.Field(0).Interface().(uint32) tmp := uint32(0) for i := 0; i < v.NumField(); i++ { // Do not print the fields of the image load config directory structure // that does not belong to it. tmp += uint32(binary.Size((v.Field(i).Interface()))) if tmp > imgLoadConfigDirectorySize { break } fmt.Fprintf(w, " %s\t : 0x%v\n", sentenceCase(typeOfS.Field(i).Name), v.Field(i).Interface()) } w.Flush() } if cfg.wantDelayImp && pe.FileInfo.HasDelayImp { fmt.Printf("\nDELAY IMPORTS\n**************\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) for _, imp := range pe.DelayImports { desc := imp.Descriptor fmt.Printf("\n\t------[ %s ]------\n\n", imp.Name) fmt.Fprintf(w, "Attributes:\t 0x%x\n", desc.Attributes) fmt.Fprintf(w, "Name:\t 0x%x\n", desc.Name) fmt.Fprintf(w, "Module Handle RVA:\t 0x%x\n", desc.ModuleHandleRVA) fmt.Fprintf(w, "Import Address Table RVA:\t 0x%x\n", desc.ImportAddressTableRVA) fmt.Fprintf(w, "Import Name Table RVA:\t 0x%x\n", desc.ImportNameTableRVA) fmt.Fprintf(w, "Bound Import Address Table RVA:\t 0x%x\n", desc.BoundImportAddressTableRVA) fmt.Fprintf(w, "Unload Information Table RVA:\t 0x%x\n", desc.UnloadInformationTableRVA) fmt.Fprintf(w, "TimeDateStamp:\t 0x%x (%s)\n", desc.TimeDateStamp, humanizeTimestamp(desc.TimeDateStamp)) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "Name\tThunkRVA\tThunkValue\tOriginalThunkRVA\tOriginalThunkValue\tHint\t") for _, fn := range imp.Functions { fmt.Fprintf(w, "%s\t0x%x\t0x%x\t0x%x\t0x%x\t0x%x\t\n", fn.Name, fn.ThunkRVA, fn.ThunkValue, fn.OriginalThunkRVA, fn.OriginalThunkValue, fn.Hint) } w.Flush() } } if cfg.wantIAT && pe.FileInfo.HasIAT { fmt.Printf("\nIAT\n****\n\n") w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "Index\tRVA\tValue\tMeaning\t") for _, entry := range pe.IAT { fmt.Fprintf(w, "0x%x\t0x%x\t%v\t%s\t\n", entry.Index, entry.Rva, entry.Value, entry.Meaning) } w.Flush() } if cfg.wantCLR && pe.FileInfo.HasCLR { fmt.Printf("\nCLR\n****\n") fmt.Print("\n\t------[ CLR Header ]------\n\n") clr := pe.CLR w := tabwriter.NewWriter(os.Stdout, 1, 1, 3, ' ', tabwriter.AlignRight) clrHdr := clr.CLRHeader flags := strings.Join(clrHdr.Flags.String(), " | ") fmt.Fprintf(w, "Size Of Header:\t 0x%x\n", clrHdr.Cb) fmt.Fprintf(w, "Major Runtime Version:\t 0x%x\n", clrHdr.MajorRuntimeVersion) fmt.Fprintf(w, "Minor Runtime Version:\t 0x%x\n", clrHdr.MinorRuntimeVersion) fmt.Fprintf(w, "MetaData RVA:\t 0x%x\n", clrHdr.MetaData.VirtualAddress) fmt.Fprintf(w, "MetaData Size:\t 0x%x\n", clrHdr.MetaData.Size) fmt.Fprintf(w, "Flags:\t 0x%x (%v)\n", clrHdr.Flags, flags) fmt.Fprintf(w, "EntryPoint RVA or Token:\t 0x%x\n", clrHdr.EntryPointRVAorToken) fmt.Fprintf(w, "Resources RVA:\t 0x%x\n", clrHdr.Resources.VirtualAddress) fmt.Fprintf(w, "Resources Size:\t 0x%x (%s)\n", clrHdr.Resources.Size, BytesSize(float64(clrHdr.Resources.Size))) fmt.Fprintf(w, "Strong Name Signature RVA:\t 0x%x\n", clrHdr.StrongNameSignature.VirtualAddress) fmt.Fprintf(w, "Strong Name Signature Size:\t 0x%x (%s)\n", clrHdr.StrongNameSignature.Size, BytesSize(float64(clrHdr.StrongNameSignature.Size))) fmt.Fprintf(w, "Code Manager Table RVA:\t 0x%x\n", clrHdr.CodeManagerTable.VirtualAddress) fmt.Fprintf(w, "Code Manager Table Size:\t 0x%x (%s)\n", clrHdr.CodeManagerTable.Size, BytesSize(float64(clrHdr.CodeManagerTable.Size))) fmt.Fprintf(w, "VTable Fixups RVA:\t 0x%x\n", clrHdr.VTableFixups.VirtualAddress) fmt.Fprintf(w, "VTable Fixups Size:\t 0x%x (%s)\n", clrHdr.VTableFixups.Size, BytesSize(float64(clrHdr.VTableFixups.Size))) fmt.Fprintf(w, "Export Address Table Jumps RVA:\t 0x%x\n", clrHdr.ExportAddressTableJumps.VirtualAddress) fmt.Fprintf(w, "Export Address Table Jumps Size:\t 0x%x (%s)\n", clrHdr.ExportAddressTableJumps.Size, BytesSize(float64(clrHdr.ExportAddressTableJumps.Size))) fmt.Fprintf(w, "Managed Native Header RVA:\t 0x%x\n", clrHdr.ManagedNativeHeader.VirtualAddress) fmt.Fprintf(w, "Managed Native Header Size:\t 0x%x (%s)\n", clrHdr.ManagedNativeHeader.Size, BytesSize(float64(clrHdr.ManagedNativeHeader.Size))) w.Flush() fmt.Print("\n\t------[ MetaData Header ]------\n\n") mdHdr := clr.MetadataHeader fmt.Fprintf(w, "Signature:\t 0x%x (%s)\n", mdHdr.Signature, string(IntToByteArray(uint64(mdHdr.Signature)))) fmt.Fprintf(w, "Major Version:\t 0x%x\n", mdHdr.MajorVersion) fmt.Fprintf(w, "Minor Version:\t 0x%x\n", mdHdr.MinorVersion) fmt.Fprintf(w, "Extra Data:\t 0x%x\n", mdHdr.ExtraData) fmt.Fprintf(w, "Version String Length:\t 0x%x\n", mdHdr.VersionString) fmt.Fprintf(w, "Version String:\t %s\n", mdHdr.Version) fmt.Fprintf(w, "Flags:\t 0x%x\n", mdHdr.Flags) fmt.Fprintf(w, "Streams Count:\t 0x%x\n", mdHdr.Streams) w.Flush() fmt.Print("\n\t------[ MetaData Streams ]------\n\n") for _, sh := range clr.MetadataStreamHeaders { fmt.Fprintf(w, "Stream Name:\t %s\n", sh.Name) fmt.Fprintf(w, "Offset:\t 0x%x\n", sh.Offset) fmt.Fprintf(w, "Size:\t 0x%x (%s)\n", sh.Size, BytesSize(float64(sh.Size))) w.Flush() fmt.Print("\n ---Stream Content---\n") hexDumpSize(clr.MetadataStreams[sh.Name], 128) fmt.Print("\n") } fmt.Print("\n\t------[ MetaData Tables Stream Header ]------\n\n") mdTablesStreamHdr := clr.MetadataTablesStreamHeader fmt.Fprintf(w, "Reserved:\t 0x%x\n", mdTablesStreamHdr.Reserved) fmt.Fprintf(w, "Major Version:\t 0x%x\n", mdTablesStreamHdr.MajorVersion) fmt.Fprintf(w, "Minor Version:\t 0x%x\n", mdTablesStreamHdr.MinorVersion) fmt.Fprintf(w, "Heaps:\t 0x%x\n", mdTablesStreamHdr.Heaps) fmt.Fprintf(w, "RID:\t 0x%x\n", mdTablesStreamHdr.RID) fmt.Fprintf(w, "MaskValid:\t 0x%x\n", mdTablesStreamHdr.MaskValid) fmt.Fprintf(w, "Sorted:\t 0x%x\n", mdTablesStreamHdr.Sorted) w.Flush() fmt.Print("\n\t------[ MetaData Tables ]------\n\n") mdTables := clr.MetadataTables for _, mdTable := range mdTables { fmt.Fprintf(w, "Name:\t %s | Items Count:\t 0x%x\n", mdTable.Name, mdTable.CountCols) } w.Flush() for table, modTable := range pe.CLR.MetadataTables { switch table { case peparser.Module: fmt.Print("\n\t[Modules]\n\t---------\n") modTableRows := modTable.Content.([]peparser.ModuleTableRow) for _, modTableRow := range modTableRows { modName := pe.GetStringFromData(modTableRow.Name, pe.CLR.MetadataStreams["#Strings"]) Mvid := pe.GetStringFromData(modTableRow.Mvid, pe.CLR.MetadataStreams["#GUID"]) MvidStr := hex.EncodeToString(Mvid) fmt.Fprintf(w, "Generation:\t 0x%x\n", modTableRow.Generation) fmt.Fprintf(w, "Name:\t 0x%x (%s)\n", modTableRow.Name, string(modName)) fmt.Fprintf(w, "Mvid:\t 0x%x (%s)\n", modTableRow.Mvid, MvidStr) fmt.Fprintf(w, "EncID:\t 0x%x\n", modTableRow.EncID) fmt.Fprintf(w, "EncBaseID:\t 0x%x\n", modTableRow.EncBaseID) } w.Flush() } } } // Get file type. if pe.IsEXE() { log.Debug("File is Exe") } if pe.IsDLL() { log.Debug("File is DLL") } if pe.IsDriver() { log.Debug("File is Driver") } // Calculate the PE checksum. pe.Checksum() fmt.Print("\n") } ================================================ FILE: cmd/main.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package main import ( "flag" "fmt" "os" "runtime" ) type config struct { wantDOSHeader bool wantRichHeader bool wantNTHeader bool wantCOFF bool wantDataDirs bool wantSections bool wantExport bool wantImport bool wantResource bool wantException bool wantCertificate bool wantReloc bool wantDebug bool wantTLS bool wantLoadCfg bool wantBoundImp bool wantIAT bool wantDelayImp bool wantCLR bool } func main() { dumpCmd := flag.NewFlagSet("dump", flag.ExitOnError) dumpDOSHdr := dumpCmd.Bool("dosheader", false, "Dump DOS header") dumpRichHdr := dumpCmd.Bool("richheader", false, "Dump Rich header") dumpNTHdr := dumpCmd.Bool("ntheader", false, "Dump NT header") dumpCOFF := dumpCmd.Bool("coff", false, "Dump COFF symbols") dumpDirs := dumpCmd.Bool("directories", false, "Dump data directories") dumpSections := dumpCmd.Bool("sections", false, "Dump sections") dumpExport := dumpCmd.Bool("export", false, "Dump export table") dumpImport := dumpCmd.Bool("import", false, "Dump import table") dumpResource := dumpCmd.Bool("resource", false, "Dump resource table") dumpException := dumpCmd.Bool("exception", false, "Dump exception table") dumpCertificate := dumpCmd.Bool("cert", false, "Dump certificate directory") dumpReloc := dumpCmd.Bool("reloc", false, "Dump relocation table") dumpDebug := dumpCmd.Bool("debug", false, "Dump debug infos") dumpTLS := dumpCmd.Bool("tls", false, "Dump TLS") dumpLoadCfg := dumpCmd.Bool("loadconfig", false, "Dump load configuration table") dumpBoundImport := dumpCmd.Bool("bound", false, "Dump bound import table") dumpIAT := dumpCmd.Bool("iat", false, "Dump IAT") dumpDelayedImport := dumpCmd.Bool("delay", false, "Dump delay import descriptor") dumpCLR := dumpCmd.Bool("clr", false, "Dump CLR") dumpCmd.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: pedumper dump [flags] \n\nFlags:\n") dumpCmd.PrintDefaults() } if len(os.Args) < 2 { showHelp() } switch os.Args[1] { case "dump": dumpCmd.Parse(os.Args[2:]) args := dumpCmd.Args() if len(args) == 0 { fmt.Fprintf(os.Stderr, "Error: missing file or directory path\n\n") dumpCmd.Usage() os.Exit(1) } filePath := args[0] // If no flags are specified, dump everything. noFlagsSet := true dumpCmd.Visit(func(f *flag.Flag) { noFlagsSet = false }) cfg := config{ wantDOSHeader: *dumpDOSHdr || noFlagsSet, wantRichHeader: *dumpRichHdr || noFlagsSet, wantNTHeader: *dumpNTHdr || noFlagsSet, wantCOFF: *dumpCOFF || noFlagsSet, wantDataDirs: *dumpDirs || noFlagsSet, wantSections: *dumpSections || noFlagsSet, wantExport: *dumpExport || noFlagsSet, wantImport: *dumpImport || noFlagsSet, wantResource: *dumpResource || noFlagsSet, wantException: *dumpException || noFlagsSet, wantCertificate: *dumpCertificate || noFlagsSet, wantReloc: *dumpReloc || noFlagsSet, wantDebug: *dumpDebug || noFlagsSet, wantTLS: *dumpTLS || noFlagsSet, wantLoadCfg: *dumpLoadCfg || noFlagsSet, wantBoundImp: *dumpBoundImport || noFlagsSet, wantIAT: *dumpIAT || noFlagsSet, wantDelayImp: *dumpDelayedImport || noFlagsSet, wantCLR: *dumpCLR || noFlagsSet, } // Start as many workers you want, default to cpu count -1. numWorkers := runtime.GOMAXPROCS(runtime.NumCPU() - 1) for w := 1; w <= numWorkers; w++ { go loopFilesWorker(cfg) } if !isDirectory(filePath) { // Input path in a single file. parsePE(filePath, cfg) } else { // Input path in a directory. LoopDirsFiles(filePath) wg.Wait() } case "version": fmt.Println("You are using version 1.6.0") default: showHelp() } } func showHelp() { fmt.Print( ` ╔═╗╔═╗ ┌─┐┌─┐┬─┐┌─┐┌─┐┬─┐ ╠═╝║╣ ├─┘├─┤├┬┘└─┐├┤ ├┬┘ ╩ ╚═╝ ┴ ┴ ┴┴└─└─┘└─┘┴└─ A PE-Parser built for speed and malware-analysis in mind. Brought to you by Saferwall (c) 2018 MIT Usage: pedumper [options] Commands: dump [flags] Parse and dump PE file information version Show version information Run 'pedumper dump -help' for dump flags. `) os.Exit(1) } ================================================ FILE: cmd/size.go ================================================ package main import ( "fmt" "strconv" "strings" ) // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal KB = 1000 MB = 1000 * KB GB = 1000 * MB TB = 1000 * GB PB = 1000 * TB // Binary KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB TiB = 1024 * GiB PiB = 1024 * TiB ) type unitMap map[byte]int64 var ( decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) var ( decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} ) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 unitsLimit := len(_map) - 1 for size >= base && i < unitsLimit { size = size / base i++ } return size, _map[i] } // CustomSize returns a human-readable approximation of a size // using custom format. func CustomSize(format string, size float64, base float64, _map []string) string { size, unit := getSizeAndUnit(size, base, _map) return fmt.Sprintf(format, size, unit) } // HumanSizeWithPrecision allows the size to be in any precision, // instead of 4 digit precision used in units.HumanSize. func HumanSizeWithPrecision(size float64, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) return fmt.Sprintf("%.*g%s", precision, size, unit) } // HumanSize returns a human-readable approximation of a size // capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). func HumanSize(size float64) string { return HumanSizeWithPrecision(size, 4) } // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a // size using SI standard (eg. "44kB", "17MB"). func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } // RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. func RAMInBytes(size string) (int64, error) { return parseSize(size, binaryMap) } // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { // TODO: rewrite to use strings.Cut if there's a space // once Go < 1.18 is deprecated. sep := strings.LastIndexAny(sizeStr, "01234567890. ") if sep == -1 { // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } var num, sfx string if sizeStr[sep] != ' ' { num = sizeStr[:sep+1] sfx = sizeStr[sep+1:] } else { // Omit the space separator. num = sizeStr[:sep] sfx = sizeStr[sep+1:] } size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } // Backward compatibility: reject negative sizes. if size < 0 { return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } if len(sfx) == 0 { return int64(size), nil } // Process the suffix. if len(sfx) > 3 { // Too long. goto badSuffix } sfx = strings.ToLower(sfx) // Trivial case: b suffix. if sfx[0] == 'b' { if len(sfx) > 1 { // no extra characters allowed after b. goto badSuffix } return int64(size), nil } // A suffix from the map. if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) } else { goto badSuffix } // The suffix may have extra "b" or "ib" (e.g. KiB or MB). switch { case len(sfx) == 2 && sfx[1] != 'b': goto badSuffix case len(sfx) == 3 && sfx[1:] != "ib": goto badSuffix } return int64(size), nil badSuffix: return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } ================================================ FILE: cmd/size_test.go ================================================ package main import ( "fmt" "reflect" "runtime" "strings" "testing" ) func ExampleBytesSize() { fmt.Println(BytesSize(1024)) fmt.Println(BytesSize(1024 * 1024)) fmt.Println(BytesSize(1048576)) fmt.Println(BytesSize(2 * MiB)) fmt.Println(BytesSize(3.42 * GiB)) fmt.Println(BytesSize(5.372 * TiB)) fmt.Println(BytesSize(2.22 * PiB)) } func ExampleHumanSize() { fmt.Println(HumanSize(1000)) fmt.Println(HumanSize(1024)) fmt.Println(HumanSize(1000000)) fmt.Println(HumanSize(1048576)) fmt.Println(HumanSize(2 * MB)) fmt.Println(HumanSize(float64(3.42 * GB))) fmt.Println(HumanSize(float64(5.372 * TB))) fmt.Println(HumanSize(float64(2.22 * PB))) } func ExampleFromHumanSize() { fmt.Println(FromHumanSize("32")) fmt.Println(FromHumanSize("32b")) fmt.Println(FromHumanSize("32B")) fmt.Println(FromHumanSize("32k")) fmt.Println(FromHumanSize("32K")) fmt.Println(FromHumanSize("32kb")) fmt.Println(FromHumanSize("32Kb")) fmt.Println(FromHumanSize("32Mb")) fmt.Println(FromHumanSize("32Gb")) fmt.Println(FromHumanSize("32Tb")) fmt.Println(FromHumanSize("32Pb")) } func ExampleRAMInBytes() { fmt.Println(RAMInBytes("32")) fmt.Println(RAMInBytes("32b")) fmt.Println(RAMInBytes("32B")) fmt.Println(RAMInBytes("32k")) fmt.Println(RAMInBytes("32K")) fmt.Println(RAMInBytes("32kb")) fmt.Println(RAMInBytes("32Kb")) fmt.Println(RAMInBytes("32Mb")) fmt.Println(RAMInBytes("32Gb")) fmt.Println(RAMInBytes("32Tb")) fmt.Println(RAMInBytes("32Pb")) fmt.Println(RAMInBytes("32PB")) fmt.Println(RAMInBytes("32P")) } func TestBytesSize(t *testing.T) { assertEquals(t, "1KiB", BytesSize(1024)) assertEquals(t, "1MiB", BytesSize(1024*1024)) assertEquals(t, "1MiB", BytesSize(1048576)) assertEquals(t, "2MiB", BytesSize(2*MiB)) assertEquals(t, "3.42GiB", BytesSize(3.42*GiB)) assertEquals(t, "5.372TiB", BytesSize(5.372*TiB)) assertEquals(t, "2.22PiB", BytesSize(2.22*PiB)) assertEquals(t, "1.049e+06YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) } func TestHumanSize(t *testing.T) { assertEquals(t, "1kB", HumanSize(1000)) assertEquals(t, "1.024kB", HumanSize(1024)) assertEquals(t, "1MB", HumanSize(1000000)) assertEquals(t, "1.049MB", HumanSize(1048576)) assertEquals(t, "2MB", HumanSize(2*MB)) assertEquals(t, "3.42GB", HumanSize(float64(3.42*GB))) assertEquals(t, "5.372TB", HumanSize(float64(5.372*TB))) assertEquals(t, "2.22PB", HumanSize(float64(2.22*PB))) assertEquals(t, "1e+04YB", HumanSize(float64(10000000000000*PB))) } func TestFromHumanSize(t *testing.T) { assertSuccessEquals(t, 0, FromHumanSize, "0") assertSuccessEquals(t, 0, FromHumanSize, "0b") assertSuccessEquals(t, 0, FromHumanSize, "0B") assertSuccessEquals(t, 0, FromHumanSize, "0 B") assertSuccessEquals(t, 32, FromHumanSize, "32") assertSuccessEquals(t, 32, FromHumanSize, "32b") assertSuccessEquals(t, 32, FromHumanSize, "32B") assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5kB") assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5 kB") assertSuccessEquals(t, 32, FromHumanSize, "32.5 B") assertSuccessEquals(t, 300, FromHumanSize, "0.3 K") assertSuccessEquals(t, 300, FromHumanSize, ".3kB") assertSuccessEquals(t, 0, FromHumanSize, "0.") assertSuccessEquals(t, 0, FromHumanSize, "0. ") assertSuccessEquals(t, 0, FromHumanSize, "0.b") assertSuccessEquals(t, 0, FromHumanSize, "0.B") assertSuccessEquals(t, 0, FromHumanSize, "-0") assertSuccessEquals(t, 0, FromHumanSize, "-0b") assertSuccessEquals(t, 0, FromHumanSize, "-0B") assertSuccessEquals(t, 0, FromHumanSize, "-0 b") assertSuccessEquals(t, 0, FromHumanSize, "-0 B") assertSuccessEquals(t, 32, FromHumanSize, "32.") assertSuccessEquals(t, 32, FromHumanSize, "32.b") assertSuccessEquals(t, 32, FromHumanSize, "32.B") assertSuccessEquals(t, 32, FromHumanSize, "32. b") assertSuccessEquals(t, 32, FromHumanSize, "32. B") // We do not tolerate extra leading or trailing spaces // (except for a space after the number and a missing suffix). assertSuccessEquals(t, 0, FromHumanSize, "0 ") assertError(t, FromHumanSize, " 0") assertError(t, FromHumanSize, " 0b") assertError(t, FromHumanSize, " 0B") assertError(t, FromHumanSize, " 0 B") assertError(t, FromHumanSize, "0b ") assertError(t, FromHumanSize, "0B ") assertError(t, FromHumanSize, "0 B ") assertError(t, FromHumanSize, "") assertError(t, FromHumanSize, "hello") assertError(t, FromHumanSize, ".") assertError(t, FromHumanSize, ". ") assertError(t, FromHumanSize, " ") assertError(t, FromHumanSize, " ") assertError(t, FromHumanSize, " .") assertError(t, FromHumanSize, " . ") assertError(t, FromHumanSize, "-32") assertError(t, FromHumanSize, "-32b") assertError(t, FromHumanSize, "-32B") assertError(t, FromHumanSize, "-32 b") assertError(t, FromHumanSize, "-32 B") assertError(t, FromHumanSize, "32b.") assertError(t, FromHumanSize, "32B.") assertError(t, FromHumanSize, "32 b.") assertError(t, FromHumanSize, "32 B.") assertError(t, FromHumanSize, "32 bb") assertError(t, FromHumanSize, "32 BB") assertError(t, FromHumanSize, "32 b b") assertError(t, FromHumanSize, "32 B B") assertError(t, FromHumanSize, "32 b") assertError(t, FromHumanSize, "32 B") assertError(t, FromHumanSize, " 32 ") assertError(t, FromHumanSize, "32m b") assertError(t, FromHumanSize, "32bm") } func TestRAMInBytes(t *testing.T) { assertSuccessEquals(t, 32, RAMInBytes, "32") assertSuccessEquals(t, 32, RAMInBytes, "32b") assertSuccessEquals(t, 32, RAMInBytes, "32B") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kib") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32KIB") assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") assertSuccessEquals(t, 32, RAMInBytes, "32.3") tmp := 32.3 * MiB assertSuccessEquals(t, int64(tmp), RAMInBytes, "32.3 mb") tmp = 0.3 * MiB assertSuccessEquals(t, int64(tmp), RAMInBytes, "0.3MB") assertError(t, RAMInBytes, "") assertError(t, RAMInBytes, "hello") assertError(t, RAMInBytes, "-32") assertError(t, RAMInBytes, " 32 ") assertError(t, RAMInBytes, "32m b") assertError(t, RAMInBytes, "32bm") } func BenchmarkParseSize(b *testing.B) { for i := 0; i < b.N; i++ { for _, s := range []string{ "", "32", "32b", "32 B", "32k", "32.5 K", "32kb", "32 Kb", "32.8Mb", "32.9Gb", "32.777Tb", "32Pb", "0.3Mb", "-1", } { FromHumanSize(s) RAMInBytes(s) } } } func assertEquals(t *testing.T, expected, actual interface{}) { t.Helper() if expected != actual { t.Errorf("Expected '%v' but got '%v'", expected, actual) } } // func that maps to the parse function signatures as testing abstraction type parseFn func(string) (int64, error) // Define 'String()' for pretty-print func (fn parseFn) String() string { fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() return fnName[strings.LastIndex(fnName, ".")+1:] } func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { t.Helper() res, err := fn(arg) if err != nil || res != expected { t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) } } func assertError(t *testing.T, fn parseFn, arg string) { t.Helper() res, err := fn(arg) if err == nil && res != -1 { t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) } } ================================================ FILE: debug.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "errors" "fmt" ) // The following values are defined for the Type field of the debug directory entry: const ( // An unknown value that is ignored by all tools. ImageDebugTypeUnknown = 0 // The COFF debug information (line numbers, symbol table, and string table). // This type of debug information is also pointed to by fields in the file headers. ImageDebugTypeCOFF = 1 // The Visual C++ debug information. ImageDebugTypeCodeView = 2 // The frame pointer omission (FPO) information. This information tells the // debugger how to interpret nonstandard stack frames, which use the EBP // register for a purpose other than as a frame pointer. ImageDebugTypeFPO = 3 // The location of DBG file. ImageDebugTypeMisc = 4 // A copy of .pdata section. ImageDebugTypeException = 5 // Reserved. ImageDebugTypeFixup = 6 // The mapping from an RVA in image to an RVA in source image. ImageDebugTypeOMAPToSrc = 7 // The mapping from an RVA in source image to an RVA in image. ImageDebugTypeOMAPFromSrc = 8 // Reserved for Borland. ImageDebugTypeBorland = 9 // Reserved. ImageDebugTypeReserved = 10 // Reserved. ImageDebugTypeCLSID = 11 // Visual C++ features (/GS counts /sdl counts and guardN counts). ImageDebugTypeVCFeature = 12 // Pogo aka PGO aka Profile Guided Optimization. ImageDebugTypePOGO = 13 // Incremental Link Time Code Generation (iLTCG). ImageDebugTypeILTCG = 14 // Intel MPX. ImageDebugTypeMPX = 15 // PE determinism or reproducibility. ImageDebugTypeRepro = 16 // Extended DLL characteristics bits. ImageDebugTypeExDllCharacteristics = 20 ) const ( // CVSignatureRSDS represents the CodeView signature 'SDSR'. CVSignatureRSDS = 0x53445352 // CVSignatureNB10 represents the CodeView signature 'NB10'. CVSignatureNB10 = 0x3031424e ) const ( // FrameFPO indicates a frame of type FPO. FrameFPO = 0x0 // FrameTrap indicates a frame of type Trap. FrameTrap = 0x1 // FrameTSS indicates a frame of type TSS. FrameTSS = 0x2 // FrameNonFPO indicates a frame of type Non-FPO. FrameNonFPO = 0x3 ) // DllCharacteristicsExType represents a DLL Characteristics type. type DllCharacteristicsExType uint32 const ( // ImageDllCharacteristicsExCETCompat indicates that the image is CET // compatible. ImageDllCharacteristicsExCETCompat = 0x0001 ) const ( // POGOTypePGU represents a signature for an undocumented PGO sub type. POGOTypePGU = 0x50475500 // POGOTypePGI represents a signature for an undocumented PGO sub type. POGOTypePGI = 0x50474900 // POGOTypePGO represents a signature for an undocumented PGO sub type. POGOTypePGO = 0x50474F00 // POGOTypeLTCG represents a signature for an undocumented PGO sub type. POGOTypeLTCG = 0x4c544347 ) // ImageDebugDirectoryType represents the type of a debug directory. type ImageDebugDirectoryType uint32 // ImageDebugDirectory represents the IMAGE_DEBUG_DIRECTORY structure. // This directory indicates what form of debug information is present // and where it is. This directory consists of an array of debug directory // entries whose location and size are indicated in the image optional header. type ImageDebugDirectory struct { // Reserved, must be 0. Characteristics uint32 `json:"characteristics"` // The time and date that the debug data was created. TimeDateStamp uint32 `json:"time_date_stamp"` // The major version number of the debug data format. MajorVersion uint16 `json:"major_version"` // The minor version number of the debug data format. MinorVersion uint16 `json:"minor_version"` // The format of debugging information. This field enables support of // multiple debuggers. Type ImageDebugDirectoryType `json:"type"` // The size of the debug data (not including the debug directory itself). SizeOfData uint32 `json:"size_of_data"` //The address of the debug data when loaded, relative to the image base. AddressOfRawData uint32 `json:"address_of_raw_data"` // The file pointer to the debug data. PointerToRawData uint32 `json:"pointer_to_raw_data"` } // DebugEntry wraps ImageDebugDirectory to include debug directory type. type DebugEntry struct { // Points to the image debug entry structure. Struct ImageDebugDirectory `json:"struct"` // Holds specific information about the debug type entry. Info interface{} `json:"info"` // Type of the debug entry. Type string `json:"type"` } // GUID is a 128-bit value consisting of one group of 8 hexadecimal digits, // followed by three groups of 4 hexadecimal digits each, followed by one // group of 12 hexadecimal digits. type GUID struct { Data1 uint32 Data2 uint16 Data3 uint16 Data4 [8]byte } // CVSignature represents a CodeView signature. type CVSignature uint32 // CVInfoPDB70 represents the the CodeView data block of a PDB 7.0 file. type CVInfoPDB70 struct { // CodeView signature, equal to `RSDS`. CVSignature CVSignature `json:"cv_signature"` // A unique identifier, which changes with every rebuild of the executable and PDB file. Signature GUID `json:"signature"` // Ever-incrementing value, which is initially set to 1 and incremented every // time when a part of the PDB file is updated without rewriting the whole file. Age uint32 `json:"age"` // Null-terminated name of the PDB file. It can also contain full or partial // path to the file. PDBFileName string `json:"pdb_file_name"` } // CVHeader represents the the CodeView header struct to the PDB 2.0 file. type CVHeader struct { // CodeView signature, equal to `NB10`. Signature CVSignature `json:"signature"` // CodeView offset. Set to 0, because debug information is stored in a // separate file. Offset uint32 `json:"offset"` } // CVInfoPDB20 represents the the CodeView data block of a PDB 2.0 file. type CVInfoPDB20 struct { // Points to the CodeView header structure. CVHeader CVHeader `json:"cv_header"` // The time when debug information was created (in seconds since 01.01.1970). Signature uint32 `json:"signature"` // Ever-incrementing value, which is initially set to 1 and incremented every // time when a part of the PDB file is updated without rewriting the whole file. Age uint32 `json:"age"` // Null-terminated name of the PDB file. It can also contain full or partial // path to the file. PDBFileName string `json:"pdb_file_name"` } // FPOFrameType represents the type of a FPO frame. type FPOFrameType uint8 // FPOData represents the stack frame layout for a function on an x86 computer when // frame pointer omission (FPO) optimization is used. The structure is used to locate // the base of the call frame. type FPOData struct { // The offset of the first byte of the function code. OffsetStart uint32 `json:"offset_start"` // The number of bytes in the function. ProcSize uint32 `json:"proc_size"` // The number of local variables. NumLocals uint32 `json:"num_locals"` // The size of the parameters, in DWORDs. ParamsSize uint16 `json:"params_size"` // The number of bytes in the function prolog code. PrologLength uint8 `json:"prolog_length"` // The number of registers saved. SavedRegsCount uint8 `json:"saved_regs_count"` // A variable that indicates whether the function uses structured exception handling. HasSEH uint8 `json:"has_seh"` // A variable that indicates whether the EBP register has been allocated. UseBP uint8 `json:"use_bp"` // Reserved for future use. Reserved uint8 `json:"reserved"` // A variable that indicates the frame type. FrameType FPOFrameType `json:"frame_type"` } // ImagePGOItem represents the _IMAGE_POGO_INFO structure. type ImagePGOItem struct { RVA uint32 `json:"rva"` Size uint32 `json:"size"` Name string `json:"name"` } // POGOType represents a POGO type. type POGOType uint32 // POGO structure contains information related to the Profile Guided Optimization. // PGO is an approach to optimization where the compiler uses profile information // to make better optimization decisions for the program. type POGO struct { // Signature represents the PGO sub type. Signature POGOType `json:"signature"` Entries []ImagePGOItem `json:"entries"` } type VCFeature struct { PreVC11 uint32 `json:"pre_vc11"` CCpp uint32 `json:"C/C++"` Gs uint32 `json:"/GS"` Sdl uint32 `json:"/sdl"` GuardN uint32 `json:"guardN"` } type REPRO struct { Size uint32 `json:"size"` Hash []byte `json:"hash"` } // ImageDebugMisc represents the IMAGE_DEBUG_MISC structure. type ImageDebugMisc struct { // The type of data carried in the `Data` field. DataType uint32 `json:"data_type"` // The length of this structure in bytes, including the entire Data field // and its NUL terminator (rounded to four byte multiple.) Length uint32 `json:"length"` // The encoding of the Data field. True if data is unicode string. Unicode bool `json:"unicode"` // Reserved. Reserved [3]byte `json:"reserved"` // Actual data. Data string `json:"data"` } // Image files contain an optional debug directory that indicates what form of // debug information is present and where it is. This directory consists of an // array of debug directory entries whose location and size are indicated in the // image optional header. The debug directory can be in a discardable .debug // section (if one exists), or it can be included in any other section in the // image file, or not be in a section at all. func (pe *File) parseDebugDirectory(rva, size uint32) error { debugEntry := DebugEntry{} debugDir := ImageDebugDirectory{} errorMsg := fmt.Sprintf("Invalid debug information. Can't read data at RVA: 0x%x", rva) debugDirSize := uint32(binary.Size(debugDir)) debugDirsCount := size / debugDirSize for i := uint32(0); i < debugDirsCount; i++ { offset := pe.GetOffsetFromRva(rva + debugDirSize*i) err := pe.structUnpack(&debugDir, offset, debugDirSize) if err != nil { return errors.New(errorMsg) } switch debugDir.Type { case ImageDebugTypeCodeView: debugSignature, err := pe.ReadUint32(debugDir.PointerToRawData) if err != nil { continue } if debugSignature == CVSignatureRSDS { // PDB 7.0 pdb := CVInfoPDB70{CVSignature: CVSignatureRSDS} // Extract the GUID. offset := debugDir.PointerToRawData + 4 guidSize := uint32(binary.Size(pdb.Signature)) err = pe.structUnpack(&pdb.Signature, offset, guidSize) if err != nil { continue } // Extract the age. offset += guidSize pdb.Age, err = pe.ReadUint32(offset) if err != nil { continue } offset += 4 // PDB file name. pdbFilenameSize := debugDir.SizeOfData - 24 - 1 // pdbFileName_size can be negative here, as seen in the malware // sample with MD5 hash: 7c297600870d026c014d42596bb9b5fd // Checking for positive size here to ensure proper parsing. if pdbFilenameSize > 0 { pdbFilename := make([]byte, pdbFilenameSize) err = pe.structUnpack(&pdbFilename, offset, pdbFilenameSize) if err != nil { continue } pdb.PDBFileName = string(pdbFilename) } // Include these extra information. debugEntry.Info = pdb } else if debugSignature == CVSignatureNB10 { // PDB 2.0. cvHeader := CVHeader{} offset := debugDir.PointerToRawData err = pe.structUnpack(&cvHeader, offset, size) if err != nil { continue } pdb := CVInfoPDB20{CVHeader: cvHeader} // Extract the signature. pdb.Signature, err = pe.ReadUint32(offset + 8) if err != nil { continue } // Extract the age. pdb.Age, err = pe.ReadUint32(offset + 12) if err != nil { continue } offset += 16 pdbFilenameSize := debugDir.SizeOfData - 16 - 1 if pdbFilenameSize > 0 { pdbFilename := make([]byte, pdbFilenameSize) err = pe.structUnpack(&pdbFilename, offset, pdbFilenameSize) if err != nil { continue } pdb.PDBFileName = string(pdbFilename) } // Include these extra information. debugEntry.Info = pdb } case ImageDebugTypePOGO: pogoSignature, err := pe.ReadUint32(debugDir.PointerToRawData) if err != nil { continue } pogo := POGO{} switch pogoSignature { case 0x0, POGOTypePGU, POGOTypePGI, POGOTypePGO, POGOTypeLTCG: // TODO: Some files like 00da1a2a9d9ebf447508bf6550f05f466f8eabb4ed6c4f2a524c0769b2d75bc1 // have a POGO signature of 0x0. To be reverse engineered. pogo.Signature = POGOType(pogoSignature) offset = debugDir.PointerToRawData + 4 c := uint32(0) for c < debugDir.SizeOfData-4 { pogoEntry := ImagePGOItem{} pogoEntry.RVA, err = pe.ReadUint32(offset) if err != nil { break } offset += 4 pogoEntry.Size, err = pe.ReadUint32(offset) if err != nil { break } offset += 4 pogoEntry.Name = string(pe.GetStringFromData(0, pe.data[offset:offset+64])) pogo.Entries = append(pogo.Entries, pogoEntry) offset += uint32(len(pogoEntry.Name)) // Make sure offset is aligned to 4 bytes. padding := 4 - (offset % 4) c += 4 + 4 + uint32(len(pogoEntry.Name)) + padding offset += padding } debugEntry.Info = pogo } case ImageDebugTypeVCFeature: vcf := VCFeature{} size := uint32(binary.Size(vcf)) err := pe.structUnpack(&vcf, debugDir.PointerToRawData, size) if err != nil { continue } debugEntry.Info = vcf case ImageDebugTypeRepro: repro := REPRO{} offset := debugDir.PointerToRawData // Extract the size. repro.Size, err = pe.ReadUint32(offset) if err != nil { continue } // Extract the hash. repro.Hash, err = pe.ReadBytesAtOffset(offset+4, repro.Size) if err != nil { continue } debugEntry.Info = repro case ImageDebugTypeFPO: offset := debugDir.PointerToRawData size := uint32(16) fpoEntries := []FPOData{} c := uint32(0) for c < debugDir.SizeOfData { fpo := FPOData{} fpo.OffsetStart, err = pe.ReadUint32(offset) if err != nil { break } fpo.ProcSize, err = pe.ReadUint32(offset + 4) if err != nil { break } fpo.NumLocals, err = pe.ReadUint32(offset + 8) if err != nil { break } fpo.ParamsSize, err = pe.ReadUint16(offset + 12) if err != nil { break } fpo.PrologLength, err = pe.ReadUint8(offset + 14) if err != nil { break } attributes, err := pe.ReadUint16(offset + 15) if err != nil { break } // // UChar cbRegs :3; /* # regs saved */ // UChar fHasSEH:1; /* Structured Exception Handling */ // UChar fUseBP :1; /* EBP has been used */ // UChar reserved:1; // UChar cbFrame:2; /* frame type */ // // The lowest 3 bits fpo.SavedRegsCount = uint8(attributes & 0x7) // The next bit. fpo.HasSEH = uint8(attributes & 0x8 >> 3) // The next bit. fpo.UseBP = uint8(attributes & 0x10 >> 4) // The next bit. fpo.Reserved = uint8(attributes & 0x20 >> 5) // The next 2 bits. fpo.FrameType = FPOFrameType(attributes & 0xC0 >> 6) fpoEntries = append(fpoEntries, fpo) c += size offset += 16 } debugEntry.Info = fpoEntries case ImageDebugTypeExDllCharacteristics: exDllChar, err := pe.ReadUint32(debugDir.PointerToRawData) if err != nil { continue } debugEntry.Info = DllCharacteristicsExType(exDllChar) } debugEntry.Struct = debugDir debugEntry.Type = debugDir.Type.String() pe.Debugs = append(pe.Debugs, debugEntry) } if len(pe.Debugs) > 0 { pe.HasDebug = true } return nil } // SectionAttributeDescription maps a section attribute to a friendly name. func SectionAttributeDescription(section string) string { sectionNameMap := map[string]string{ ".00cfg": "CFG Check Functions Pointers", ".bss$00": "Uninit.data in phaseN of Pri7", ".bss$dk00": "PGI: Uninit.data may be not const", ".bss$dk01": "PGI: Uninit.data may be not const", ".bss$pr00": "PGI: Uninit.data only for read", ".bss$pr03": "PGI: Uninit.data only for read", ".bss$zz": "PGO: Dead uninit.data", ".CRT$XCA": "First C++ Initializer", ".CRT$XCZ": "Last C++ Initializer", ".xdata$x": "EH data", ".gfids$y": "CFG Functions table", ".CRT$XCAA": "Startup C++ Initializer", ".CRT$XCC": "Global initializer: init_seg(compiler)", ".CRT$XCL": "Global initializer: init_seg(lib)", ".CRT$XCU": "Global initializer: init_seg(user)", ".CRT$XDA": "First Dynamic TLS Initializer", ".CRT$XDZ": "Last Dynamic TLS Initializer", ".CRT$XIA": "First C Initializer", ".CRT$XIAA": "Startup C Initializer", ".CRT$XIAB": "PGO C Initializer", ".CRT$XIAC": "Post-PGO C Initializer", ".CRT$XIC": "CRT C Initializers", ".CRT$XIYA": "VCCorLib Threading Model Initializer", ".CRT$XIYAA": "XAML Designer Threading Model Override Initializer", ".CRT$XIYB": "VCCorLib Main Initializer", ".CRT$XIZ": "Last C Initializer", ".CRT$XLA": "First Loader TLS Callback", ".CRT$XLC": "CRT TLS Constructor", ".CRT$XLD": "CRT TLS Terminator", ".CRT$XLZ": "Last Loader TLS Callback", ".CRT$XPA": "First Pre-Terminator", ".CRT$XPB": "CRT ConcRT Pre-Terminator", ".CRT$XPX": "CRT Pre-Terminators", ".CRT$XPXA": "CRT stdio Pre-Terminator", ".CRT$XPZ": "Last Pre-Terminator", ".CRT$XTA": "First Terminator", ".CRT$XTZ": "Last Terminator", ".CRTMA$XCA": "First Managed C++ Initializer", ".CRTMA$XCZ": "Last Managed C++ Initializer", ".CRTVT$XCA": "First Managed VTable Initializer", ".CRTVT$XCZ": "Last Managed VTable Initializer", ".data$00": "Init.data in phaseN of Pri7", ".data$dk00": "PGI: Init.data may be not const", ".data$dk00$brc": "PGI: Init.data may be not const", ".data$pr00": "PGI: Init.data only for read", ".data$r": "RTTI Type Descriptors", ".data$zz": "PGO: Dead init.data", ".data$zz$brc": "PGO: Dead init.data", ".didat$2": "Delay Import Descriptors", ".didat$3": "Delay Import Final NULL Entry", ".didat$4": "Delay Import INT", ".didat$5": "Delay Import IAT", ".didat$6": "Delay Import Symbol Names", ".didat$7": "Delay Import Bound IAT", ".edata": "Export Table", ".gehcont": "CFG EHCont Table", ".gfids": "CFG Functions Table", ".giats": "CFG IAT Table", ".idata$2": "Import Descriptors", ".idata$3": "Import Final NULL Entry", ".idata$4": "Import Names Table", ".idata$5": "Import Addresses Table", ".idata$6": "Import Symbol and DLL Names", ".pdata": "Procedure data", ".rdata$00": "Readonly data in phaseN of Pri7", ".rdata$00$brc": "Readonly data in phaseN of Pri7", ".rdata$09": "Readonly data in phaseN of Pri7", ".rdata$brc": "BaseRelocation Clustering", ".rdata$r": "RTTI Data", ".rdata$sxdata": "Safe SEH", ".rdata$T": "TLS Header", ".rdata$zETW0": "ETW Metadata Header", ".rdata$zETW1": "ETW Events Metadata", ".rdata$zETW2": "ETW Providers Metadata", ".rdata$zETW9": "ETW Metadata Footer", ".rdata$zz": "PGO: Dead Readonly Data", ".rdata$zz$brc": "PGO: Dead Readonly Data", ".rdata$zzzdbg": "Debug directory data", ".rsrc$01": "Resources Header", ".rsrc$02": "Resources Data", ".rtc$IAA": "First RTC Initializer", ".rtc$IZZ": "Last RTC Initializer", ".rtc$TAA": "First RTC Terminator", ".rtc$TZZ": "Last RTC Terminator", ".text$di": "MSVC Dynamic Initializers", ".text$lp00kernel32.dll!20_pri7": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp01kernel32.dll!20_pri7": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp03kernel32.dll!30_clientonly": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp04kernel32.dll!30_clientonly": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp08kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp09kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", ".text$lp10kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", ".text$mn": "Contains EP", ".text$mn$00": "CFG Dispatching", ".text$np": "PGO: __asm or disabled via pragma", ".text$x": "EH Filters", ".text$yd": "MSVC Destructors", ".text$zy": "PGO: Dead Code Blocks", ".text$zz": "PGO: Dead Whole Functions", ".xdata": "Unwind data", } if val, ok := sectionNameMap[section]; ok { return val } return "" } // String returns a string interpretation of the FPO frame type. func (ft FPOFrameType) String() string { frameTypeMap := map[FPOFrameType]string{ FrameFPO: "FPO", FrameTrap: "Trap", FrameTSS: "TSS", FrameNonFPO: "Non FPO", } v, ok := frameTypeMap[ft] if ok { return v } return "?" } // String returns the string representation of a GUID. func (g GUID) String() string { return fmt.Sprintf("{%06X-%04X-%04X-%04X-%X}", g.Data1, g.Data2, g.Data3, g.Data4[0:2], g.Data4[2:]) } // String returns the string representation of a debug entry type. func (t ImageDebugDirectoryType) String() string { debugTypeMap := map[ImageDebugDirectoryType]string{ ImageDebugTypeUnknown: "Unknown", ImageDebugTypeCOFF: "COFF", ImageDebugTypeCodeView: "CodeView", ImageDebugTypeFPO: "FPO", ImageDebugTypeMisc: "Misc", ImageDebugTypeException: "Exception", ImageDebugTypeFixup: "Fixup", ImageDebugTypeOMAPToSrc: "OMAP To Src", ImageDebugTypeOMAPFromSrc: "OMAP From Src", ImageDebugTypeBorland: "Borland", ImageDebugTypeReserved: "Reserved", ImageDebugTypeVCFeature: "VC Feature", ImageDebugTypePOGO: "POGO", ImageDebugTypeILTCG: "iLTCG", ImageDebugTypeMPX: "MPX", ImageDebugTypeRepro: "REPRO", ImageDebugTypeExDllCharacteristics: "Ex.DLL Characteristics", } v, ok := debugTypeMap[t] if ok { return v } return "?" } // String returns a string interpretation of a POGO type. func (p POGOType) String() string { pogoTypeMap := map[POGOType]string{ POGOTypePGU: "PGU", POGOTypePGI: "PGI", POGOTypePGO: "PGO", POGOTypeLTCG: "LTCG", } v, ok := pogoTypeMap[p] if ok { return v } return "?" } // String returns a string interpretation of a CodeView signature. func (s CVSignature) String() string { cvSignatureMap := map[CVSignature]string{ CVSignatureRSDS: "RSDS", CVSignatureNB10: "NB10", } v, ok := cvSignatureMap[s] if ok { return v } return "?" } // String returns a string interpretation of Dll Characteristics Ex. func (flag DllCharacteristicsExType) String() string { dllCharacteristicsExTypeMap := map[DllCharacteristicsExType]string{ ImageDllCharacteristicsExCETCompat: "CET Compatible", } v, ok := dllCharacteristicsExTypeMap[flag] if ok { return v } return "?" } ================================================ FILE: debug_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestDebugIn struct { filepath string index int // debug entry index } func TestDebugDirectoryCodeView(t *testing.T) { type TestCodeView struct { debugEntry DebugEntry signature string } tests := []struct { in TestDebugIn out TestCodeView }{ { TestDebugIn{ index: 0, filepath: getAbsoluteFilePath("test/kernel32.dll"), }, TestCodeView{ debugEntry: DebugEntry{ Struct: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x38b369c4, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x2, SizeOfData: 0x25, AddressOfRawData: 0x932f0, PointerToRawData: 0x91cf0, }, Info: CVInfoPDB70{ CVSignature: 0x53445352, Signature: GUID{ Data1: 0xdbe09e71, Data2: 0xb370, Data3: 0x9cb7, Data4: [8]byte{34, 197, 94, 85, 115, 250, 123, 225}, }, Age: 0x1, PDBFileName: "kernel32.pdb", }, Type: "CodeView", }, signature: "RSDS", }, }, { TestDebugIn{ index: 0, filepath: getAbsoluteFilePath("test/01008963d32f5cc17b64c31446386ee5b36a7eab6761df87a2989ba9394d8f3d"), }, TestCodeView{ debugEntry: DebugEntry{ Struct: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x3b7d84d4, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x2, SizeOfData: 0x1d, AddressOfRawData: 0x1cf4, PointerToRawData: 0x10f4, }, Info: CVInfoPDB20{ CVHeader: CVHeader{ Signature: 0x3031424e, Offset: 0x0, }, Signature: 0x3b7d84d4, Age: 0x1, PDBFileName: "routemon.pdb", }, Type: "CodeView", }, signature: "NB10", }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } debugEntry := file.Debugs[tt.in.index] if !reflect.DeepEqual(debugEntry, tt.out.debugEntry) { t.Fatalf("debug entry assertion failed, got %v, want %v", debugEntry, tt.out.debugEntry) } cvSignature := "" switch debugEntry.Info.(type) { case CVInfoPDB70: cvSignature = debugEntry.Info.(CVInfoPDB70).CVSignature.String() case CVInfoPDB20: cvSignature = debugEntry.Info.(CVInfoPDB20).CVHeader.Signature.String() } if cvSignature != tt.out.signature { t.Fatalf("debug CV signature assertion failed, got %v, want %v", cvSignature, tt.out.signature) } }) } } func TestDebugDirectoryPOGO(t *testing.T) { type TestPOGO struct { imgDebugEntry ImageDebugDirectory entriesCount int debugType string POGOItemIndex int POGOItem ImagePGOItem POGOSignature string } tests := []struct { in TestDebugIn out TestPOGO }{ { TestDebugIn{ index: 1, filepath: getAbsoluteFilePath("test/kernel32.dll"), }, TestPOGO{ imgDebugEntry: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x38b369c4, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0xd, SizeOfData: 0x574, AddressOfRawData: 0x93318, PointerToRawData: 0x91d18, }, debugType: "POGO", entriesCount: 60, POGOItemIndex: 0, POGOItem: ImagePGOItem{ RVA: 0x1000, Size: 0x280, Name: ".text$lp00kernel32.dll!20_pri7", }, POGOSignature: "PGU", }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } imgDebugEntry := file.Debugs[tt.in.index].Struct if !reflect.DeepEqual(imgDebugEntry, tt.out.imgDebugEntry) { t.Fatalf("debug entry assertion failed, got %v, want %v", imgDebugEntry, tt.out.imgDebugEntry) } debugTypeString := file.Debugs[tt.in.index].Type if debugTypeString != tt.out.debugType { t.Fatalf("debug type assertion failed, got %v, want %v", debugTypeString, tt.out.debugType) } pogo := file.Debugs[tt.in.index].Info.(POGO) entriesCount := len(pogo.Entries) if entriesCount != tt.out.entriesCount { t.Fatalf("debug entry count failed, got %v, want %v", entriesCount, tt.out.entriesCount) } pogoItem := pogo.Entries[tt.out.POGOItemIndex] if !reflect.DeepEqual(pogoItem, tt.out.POGOItem) { t.Fatalf("debug pogo entry assertion failed, got %v, want %v", pogoItem, tt.out.POGOItemIndex) } pogoItemSignature := pogo.Signature.String() if pogoItemSignature != tt.out.POGOSignature { t.Fatalf("debug pogo signature string assertion failed, got %v, want %v", pogoItemSignature, tt.out.POGOSignature) } }) } } func TestDebugDirectoryREPRO(t *testing.T) { type TestREPRO struct { debugType string debugEntry DebugEntry } tests := []struct { in TestDebugIn out TestREPRO }{ { TestDebugIn{ index: 2, filepath: getAbsoluteFilePath("test/kernel32.dll"), }, TestREPRO{ debugEntry: DebugEntry{ Struct: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x38b369c4, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x10, SizeOfData: 0x24, AddressOfRawData: 0x9388c, PointerToRawData: 0x9228c, }, Info: REPRO{ Size: 0x20, Hash: []byte{113, 158, 224, 219, 112, 179, 183, 156, 34, 197, 94, 85, 115, 250, 123, 225, 130, 247, 187, 89, 220, 154, 207, 99, 80, 113, 179, 171, 196, 105, 179, 56}, }, Type: "REPRO", }, }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } debugEntry := file.Debugs[tt.in.index] if !reflect.DeepEqual(debugEntry, tt.out.debugEntry) { t.Fatalf("debug entry assertion failed, got %v, want %v", debugEntry, tt.out.debugEntry) } }) } } func TestDebugDirectoryExDLLCharacteristics(t *testing.T) { type TestExDLLCharacteristics struct { debugEntry DebugEntry exDLLCharacteristics string } tests := []struct { in TestDebugIn out TestExDLLCharacteristics }{ { TestDebugIn{ index: 3, filepath: getAbsoluteFilePath("test/kernel32.dll"), }, TestExDLLCharacteristics{ debugEntry: DebugEntry{ Struct: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x38b369c4, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x14, SizeOfData: 0x4, AddressOfRawData: 0x938b0, PointerToRawData: 0x922b0, }, Info: DllCharacteristicsExType(0x1), Type: "Ex.DLL Characteristics", }, exDLLCharacteristics: "CET Compatible", }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } debugEntry := file.Debugs[tt.in.index] if !reflect.DeepEqual(debugEntry, tt.out.debugEntry) { t.Fatalf("debug entry assertion failed, got %v, want %v", debugEntry, tt.out.debugEntry) } dllCharacteristicsExString := debugEntry.Info.(DllCharacteristicsExType).String() if dllCharacteristicsExString != tt.out.exDLLCharacteristics { t.Fatalf("debug entry DllCharacteristicsEx string assertion failed, got %v, want %v", dllCharacteristicsExString, tt.out.exDLLCharacteristics) } }) } } func TestDebugDirectoryVCFeature(t *testing.T) { type TestVCFeature struct { debugEntry DebugEntry } tests := []struct { in TestDebugIn out TestVCFeature }{ { TestDebugIn{ index: 1, filepath: getAbsoluteFilePath("test/00da1a2a9d9ebf447508bf6550f05f466f8eabb4ed6c4f2a524c0769b2d75bc1"), }, TestVCFeature{ debugEntry: DebugEntry{ Struct: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x5ef47ea0, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0xc, SizeOfData: 0x14, AddressOfRawData: 0x39d58, PointerToRawData: 0x39158, }, Info: VCFeature{ PreVC11: 0xa, CCpp: 0x115, Gs: 0xe4, Sdl: 0x0, GuardN: 0x115, }, Type: "VC Feature", }, }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } debugEntry := file.Debugs[tt.in.index] if !reflect.DeepEqual(debugEntry, tt.out.debugEntry) { t.Fatalf("debug entry assertion failed, got %+v, want %+v", debugEntry, tt.out.debugEntry) } }) } } func TestDebugDirectoryFPO(t *testing.T) { type TestFPO struct { imgDebugEntry ImageDebugDirectory entriesCount int debugType string FPODataIndex int FPOData FPOData FPOFrameType string } tests := []struct { in TestDebugIn out TestFPO }{ { TestDebugIn{ index: 1, filepath: getAbsoluteFilePath("test/jobexec.dll"), }, TestFPO{ imgDebugEntry: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x355b8e5f, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x3, SizeOfData: 0x840, AddressOfRawData: 0x0, PointerToRawData: 0xb310, }, debugType: "FPO", entriesCount: 131, FPODataIndex: 0, FPOData: FPOData{ OffsetStart: 0x1bc0, ProcSize: 0x22, }, FPOFrameType: "FPO", }, }, { TestDebugIn{ index: 1, filepath: getAbsoluteFilePath("test/jobexec.dll"), }, TestFPO{ imgDebugEntry: ImageDebugDirectory{ Characteristics: 0x0, TimeDateStamp: 0x355b8e5f, MajorVersion: 0x0, MinorVersion: 0x0, Type: 0x3, SizeOfData: 0x840, AddressOfRawData: 0x0, PointerToRawData: 0xb310, }, debugType: "FPO", entriesCount: 131, FPODataIndex: 2, FPOData: FPOData{ OffsetStart: 0x1c26, ProcSize: 0x267, NumLocals: 0x104, ParamsSize: 0x1, PrologLength: 0x16, SavedRegsCount: 0x3, HasSEH: 0x0, UseBP: 0x1, Reserved: 0x0, FrameType: 0x3, }, FPOFrameType: "Non FPO", }, }, } for _, tt := range tests { t.Run(tt.in.filepath, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in.filepath, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in.filepath, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in.filepath, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDebug] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDebugDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in.filepath, err) } imgDebugEntry := file.Debugs[tt.in.index].Struct if !reflect.DeepEqual(imgDebugEntry, tt.out.imgDebugEntry) { t.Fatalf("debug entry assertion failed, got %v, want %v", imgDebugEntry, tt.out.imgDebugEntry) } debugTypeString := file.Debugs[tt.in.index].Type if debugTypeString != tt.out.debugType { t.Fatalf("debug type assertion failed, got %v, want %v", debugTypeString, tt.out.debugType) } fpo := file.Debugs[tt.in.index].Info.([]FPOData) entriesCount := len(fpo) if entriesCount != tt.out.entriesCount { t.Fatalf("debug entry count failed, got %v, want %v", entriesCount, tt.out.entriesCount) } fpoData := fpo[tt.out.FPODataIndex] if !reflect.DeepEqual(fpoData, tt.out.FPOData) { t.Fatalf("debug FPO data entry assertion failed, got %v, want %v", fpoData, tt.out.FPOData) } frameType := fpoData.FrameType.String() if frameType != tt.out.FPOFrameType { t.Fatalf("debug FPO frame type string assertion failed, got %v, want %v", frameType, tt.out.FPOFrameType) } }) } } func TestDebugSectionAttributes(t *testing.T) { tests := []struct { in string out string }{ { ".00cfg", "CFG Check Functions Pointers", }, { "__undefined__", "", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { secAttrString := SectionAttributeDescription(tt.in) if secAttrString != tt.out { t.Fatalf("debug section attributes description failed, got %v, want %v", secAttrString, tt.out) } }) } } ================================================ FILE: delayimports.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // ImageDelayImportDescriptor represents the _IMAGE_DELAYLOAD_DESCRIPTOR structure. type ImageDelayImportDescriptor struct { // As yet, no attribute flags are defined. The linker sets this field to zero // in the image. This field can be used to extend the record by indicating // the presence of new fields, or it can be used to indicate behaviors to // the delay or unload helper functions. Attributes uint32 `json:"attributes"` // The name of the DLL to be delay-loaded resides in the read-only data // section of the image. It is referenced through the szName field. Name uint32 `json:"name"` // The handle of the DLL to be delay-loaded is in the data section of the // image. The phmod field points to the handle. The supplied delay-load // helper uses this location to store the handle to the loaded DLL. ModuleHandleRVA uint32 `json:"module_handle_rva"` // The delay import address table (IAT) is referenced by the delay import // descriptor through the pIAT field. The delay-load helper updates these // pointers with the real entry points so that the thunks are no longer in // the calling loop ImportAddressTableRVA uint32 `json:"import_address_table_rva"` // The delay import name table (INT) contains the names of the imports that // might require loading. They are ordered in the same fashion as the // function pointers in the IAT. ImportNameTableRVA uint32 `json:"import_name_table_rva"` // The delay bound import address table (BIAT) is an optional table of // IMAGE_THUNK_DATA items that is used along with the timestamp field // of the delay-load directory table by a post-process binding phase. BoundImportAddressTableRVA uint32 `json:"bound_import_address_table_rva"` // The delay unload import address table (UIAT) is an optional table of // IMAGE_THUNK_DATA items that the unload code uses to handle an explicit // unload request. It consists of initialized data in the read-only section // that is an exact copy of the original IAT that referred the code to the // delay-load thunks. On the unload request, the library can be freed, // the *phmod cleared, and the UIAT written over the IAT to restore // everything to its preload state. UnloadInformationTableRVA uint32 `json:"unload_information_table_rva"` // 0 if not bound, otherwise, date/time of the target DLL. TimeDateStamp uint32 `json:"time_date_stamp"` } // DelayImport represents an entry in the delay import table. type DelayImport struct { Offset uint32 `json:"offset"` Name string `json:"name"` Functions []ImportFunction `json:"functions"` Descriptor ImageDelayImportDescriptor `json:"descriptor"` } // Delay-Load Import Tables tables were added to the image to support a uniform // mechanism for applications to delay the loading of a DLL until the first call // into that DLL. The delay-load directory table is the counterpart to the // import directory table. func (pe *File) parseDelayImportDirectory(rva, size uint32) error { for { importDelayDesc := ImageDelayImportDescriptor{} fileOffset := pe.GetOffsetFromRva(rva) importDescSize := uint32(binary.Size(importDelayDesc)) err := pe.structUnpack(&importDelayDesc, fileOffset, importDescSize) // If the RVA is invalid all would blow up. Some EXEs seem to be // specially nasty and have an invalid RVA. if err != nil { return err } // If the structure is all zeros, we reached the end of the list. if importDelayDesc == (ImageDelayImportDescriptor{}) { break } rva += importDescSize // If the array of thunks is somewhere earlier than the import // descriptor we can set a maximum length for the array. Otherwise // just set a maximum length of the size of the file maxLen := uint32(len(pe.data)) - fileOffset if rva > importDelayDesc.ImportNameTableRVA || rva > importDelayDesc.ImportAddressTableRVA { if rva < importDelayDesc.ImportNameTableRVA { maxLen = rva - importDelayDesc.ImportAddressTableRVA } else if rva < importDelayDesc.ImportAddressTableRVA { maxLen = rva - importDelayDesc.ImportNameTableRVA } else { maxLen = Max(rva-importDelayDesc.ImportNameTableRVA, rva-importDelayDesc.ImportAddressTableRVA) } } var importedFunctions []ImportFunction if pe.Is64 { importedFunctions, err = pe.parseImports64(&importDelayDesc, maxLen) } else { importedFunctions, err = pe.parseImports32(&importDelayDesc, maxLen) } if err != nil { return err } nameRVA := uint32(0) if importDelayDesc.Attributes == 0 { nameRVA = importDelayDesc.Name - pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase } else { nameRVA = importDelayDesc.Name } dllName := pe.getStringAtRVA(nameRVA, maxLen) if !IsValidDosFilename(dllName) { dllName = "*invalid*" continue } pe.DelayImports = append(pe.DelayImports, DelayImport{ Offset: fileOffset, Name: string(dllName), Functions: importedFunctions, Descriptor: importDelayDesc, }) } if len(pe.DelayImports) > 0 { pe.HasDelayImp = true } return nil } // GetDelayImportEntryInfoByRVA return an import function + index of the entry given // an RVA. func (pe *File) GetDelayImportEntryInfoByRVA(rva uint32) (DelayImport, int) { for _, imp := range pe.DelayImports { for i, entry := range imp.Functions { if entry.ThunkRVA == rva { return imp, i } } } return DelayImport{}, 0 } ================================================ FILE: delayimports_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestDelayImportEntry struct { entryCount int entryIndex int entry DelayImport } func TestDelayImportDirectory(t *testing.T) { tests := []struct { in string out TestDelayImportEntry }{ { getAbsoluteFilePath("test/000049925c578e5a0883e7d1a8257c1a44feab8f7d9972ace8d0e3fb96612a4c"), TestDelayImportEntry{ entryCount: 4, entryIndex: 0, entry: DelayImport{ Offset: 0x5F7C00, Name: "kernel32.dll", Functions: []ImportFunction{ { Name: "GetLogicalProcessorInformation", Hint: 0x0, ByOrdinal: false, OriginalThunkValue: 0x601192, ThunkValue: 0xF04E60, ThunkRVA: 0x6010B4, OriginalThunkRVA: 0x6010F0, }, }, Descriptor: ImageDelayImportDescriptor{ Attributes: 0x1, Name: 0x601184, ModuleHandleRVA: 0x6010A0, ImportAddressTableRVA: 0x6010B4, ImportNameTableRVA: 0x6010F0, BoundImportAddressTableRVA: 0x60112C, UnloadInformationTableRVA: 0x601158, TimeDateStamp: 0x0, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryDelayImport] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryDelayImport] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseDelayImportDirectory(va, size) if err != nil { t.Fatalf("parseDelayImportDirectory(%s) failed, reason: %v", tt.in, err) } got := file.DelayImports if len(got) != tt.out.entryCount { t.Errorf("delay imports entry count assertion failed, got %v, want %v", len(got), tt.out.entryCount) } if len(file.DelayImports) > 0 { delayImportEntry := file.DelayImports[tt.out.entryIndex] if !reflect.DeepEqual(delayImportEntry, tt.out.entry) { t.Errorf("delay import entry assertion failed, got %v, want %v", delayImportEntry, tt.out.entry) } } }) } } ================================================ FILE: dosheader.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // ImageDOSHeader represents the DOS stub of a PE. type ImageDOSHeader struct { // Magic number. Magic uint16 `json:"magic"` // Bytes on last page of file. BytesOnLastPageOfFile uint16 `json:"bytes_on_last_page_of_file"` // Pages in file. PagesInFile uint16 `json:"pages_in_file"` // Relocations. Relocations uint16 `json:"relocations"` // Size of header in paragraphs. SizeOfHeader uint16 `json:"size_of_header"` // Minimum extra paragraphs needed. MinExtraParagraphsNeeded uint16 `json:"min_extra_paragraphs_needed"` // Maximum extra paragraphs needed. MaxExtraParagraphsNeeded uint16 `json:"max_extra_paragraphs_needed"` // Initial (relative) SS value. InitialSS uint16 `json:"initial_ss"` // Initial SP value. InitialSP uint16 `json:"initial_sp"` // Checksum. Checksum uint16 `json:"checksum"` // Initial IP value. InitialIP uint16 `json:"initial_ip"` // Initial (relative) CS value. InitialCS uint16 `json:"initial_cs"` // File address of relocation table. AddressOfRelocationTable uint16 `json:"address_of_relocation_table"` // Overlay number. OverlayNumber uint16 `json:"overlay_number"` // Reserved words. ReservedWords1 [4]uint16 `json:"reserved_words_1"` // OEM identifier. OEMIdentifier uint16 `json:"oem_identifier"` // OEM information. OEMInformation uint16 `json:"oem_information"` // Reserved words. ReservedWords2 [10]uint16 `json:"reserved_words_2"` // File address of new exe header (Elfanew). AddressOfNewEXEHeader uint32 `json:"address_of_new_exe_header"` } // ParseDOSHeader parses the DOS header stub. Every PE file begins with a small // MS-DOS stub. The need for this arose in the early days of Windows, before a // significant number of consumers were running it. When executed on a machine // without Windows, the program could at least print out a message saying that // Windows was required to run the executable. func (pe *File) ParseDOSHeader() (err error) { offset := uint32(0) size := uint32(binary.Size(pe.DOSHeader)) err = pe.structUnpack(&pe.DOSHeader, offset, size) if err != nil { return err } // It can be ZM on an (non-PE) EXE. // These executables still work under XP via ntvdm. if pe.DOSHeader.Magic != ImageDOSSignature && pe.DOSHeader.Magic != ImageDOSZMSignature { return ErrDOSMagicNotFound } // `e_lfanew` is the only required element (besides the signature) of the // DOS header to turn the EXE into a PE. It is is a relative offset to the // NT Headers. It can't be null (signatures would overlap). // Can be 4 at minimum. if pe.DOSHeader.AddressOfNewEXEHeader < 4 || pe.DOSHeader.AddressOfNewEXEHeader > pe.size { return ErrInvalidElfanewValue } // tiny pe has a e_lfanew of 4, which means the NT Headers is overlapping // the DOS Header. if pe.DOSHeader.AddressOfNewEXEHeader <= 0x3c { pe.Anomalies = append(pe.Anomalies, AnoPEHeaderOverlapDOSHeader) } pe.HasDOSHdr = true return nil } ================================================ FILE: dosheader_test.go ================================================ // Copyright 2022 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) type TestDOSHeader struct { imageDOSHeader ImageDOSHeader } func TestParseDOSHeader(t *testing.T) { tests := []struct { in string out TestDOSHeader }{ { getAbsoluteFilePath("test/putty.exe"), TestDOSHeader{ imageDOSHeader: ImageDOSHeader{ Magic: 0x5a4d, BytesOnLastPageOfFile: 0x78, PagesInFile: 0x1, Relocations: 0x0, SizeOfHeader: 0x4, MinExtraParagraphsNeeded: 0x0, MaxExtraParagraphsNeeded: 0x0, InitialSS: 0x0, InitialSP: 0x0, Checksum: 0x0, InitialIP: 0x0, InitialCS: 0x0, AddressOfRelocationTable: 0x40, OverlayNumber: 0x0, ReservedWords1: [4]uint16{}, OEMIdentifier: 0x0, OEMInformation: 0x0, ReservedWords2: [10]uint16{}, AddressOfNewEXEHeader: 0x78, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.ParseDOSHeader() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.DOSHeader if got != tt.out.imageDOSHeader { t.Errorf("parse DOS header assertion failed, got %v, want %v", got, tt.out.imageDOSHeader) } }) } } func TestParseDOSHeaderNonMZ(t *testing.T) { tests := []struct { in string out error }{ { // This is an ELF file. getAbsoluteFilePath("test/look"), ErrDOSMagicNotFound, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.ParseDOSHeader() if err != tt.out { t.Fatalf("parsing DOS header failed, got %v, want %v", err, tt.out) } }) } } ================================================ FILE: dotnet.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // References // https://www.ntcore.com/files/dotnetformat.htm // COMImageFlagsType represents a COM+ header entry point flag type. type COMImageFlagsType uint32 // COM+ Header entry point flags. const ( // The image file contains IL code only, with no embedded native unmanaged // code except the start-up stub (which simply executes an indirect jump to // the CLR entry point). COMImageFlagsILOnly = 0x00000001 // The image file can be loaded only into a 32-bit process. COMImageFlags32BitRequired = 0x00000002 // This flag is obsolete and should not be set. Setting it—as the IL // assembler allows, using the .corflags directive—will render your module // un-loadable. COMImageFlagILLibrary = 0x00000004 // The image file is protected with a strong name signature. COMImageFlagsStrongNameSigned = 0x00000008 // The executable’s entry point is an unmanaged method. The EntryPointToken/ // EntryPointRVA field of the CLR header contains the RVA of this native // method. This flag was introduced in version 2.0 of the CLR. COMImageFlagsNativeEntrypoint = 0x00000010 // The CLR loader and the JIT compiler are required to track debug // information about the methods. This flag is not used. COMImageFlagsTrackDebugData = 0x00010000 // The image file can be loaded into any process, but preferably into a // 32-bit process. This flag can be only set together with flag // COMIMAGE_FLAGS_32BITREQUIRED. When set, these two flags mean the image // is platformneutral, but prefers to be loaded as 32-bit when possible. // This flag was introduced in CLR v4.0 COMImageFlags32BitPreferred = 0x00020000 ) // V-table constants. const ( // V-table slots are 32-bits in size. CORVTable32Bit = 0x01 // V-table slots are 64-bits in size. CORVTable64Bit = 0x02 // The thunk created by the common language runtime must provide data // marshaling between managed and unmanaged code. CORVTableFromUnmanaged = 0x04 // The thunk created by the common language runtime must provide data // marshaling between managed and unmanaged code. Current appdomain should // be selected to dispatch the call. CORVTableFromUnmanagedRetainAppDomain = 0x08 // Call most derived method described by CORVTableCallMostDerived = 0x10 ) // Metadata Tables constants. const ( // The current module descriptor. Module = 0 // Class reference descriptors. TypeRef = 1 // Class or interface definition descriptors. TypeDef = 2 // A class-to-fields lookup table, which does not exist in optimized // metadata (#~ stream). FieldPtr = 3 // Field definition descriptors. Field = 4 // A class-to-methods lookup table, which does not exist in // optimized metadata (#~ stream). MethodPtr = 5 // Method definition descriptors. MethodDef = 6 // A method-to-parameters lookup table, which does not exist in optimized // metadata (#~ stream). ParamPtr = 7 // Parameter definition descriptors. Param = 8 // Interface implementation descriptors. InterfaceImpl = 9 // Member (field or method) reference descriptors. MemberRef = 10 // Constant value descriptors that map the default values stored in the // #Blob stream to respective fields, parameters, and properties. Constant = 11 // Custom attribute descriptors. CustomAttribute = 12 // Field or parameter marshaling descriptors for managed/unmanaged // inter-operations. FieldMarshal = 13 // Security descriptors. DeclSecurity = 14 // Class layout descriptors that hold information about how the loader // should lay out respective classes. ClassLayout = 15 // Field layout descriptors that specify the offset or ordinal of // individual fields. FieldLayout = 16 // Stand-alone signature descriptors. Signatures per se are used in two // capacities: as composite signatures of local variables of methods and as // parameters of the call indirect (calli) IL instruction. StandAloneSig = 17 // A class-to-events mapping table. This is not an intermediate lookup // table, and it does exist in optimized metadata. EventMap = 18 // An event map–to–events lookup table, which does not exist in optimized // metadata (#~ stream). EventPtr = 19 // Event descriptors. Event = 20 // A class-to-properties mapping table. This is not an intermediate lookup // table, and it does exist in optimized metadata. PropertyMap = 21 // A property map–to–properties lookup table, which does not exist in // optimized metadata (#~ stream). PropertyPtr = 22 // Property descriptors. Property = 23 // Method semantics descriptors that hold information about which method is // associated with a specific property or event and in what capacity. MethodSemantics = 24 // Method implementation descriptors. MethodImpl = 25 // Module reference descriptors. ModuleRef = 26 // Type specification descriptors. TypeSpec = 27 // Implementation map descriptors used for the platform invocation // (P/Invoke) type of managed/unmanaged code inter-operation. ImplMap = 28 // Field-to-data mapping descriptors. FieldRVA = 29 // Edit-and-continue log descriptors that hold information about what // changes have been made to specific metadata items during in-memory // editing. This table does not exist in optimized metadata (#~ stream) ENCLog = 30 // Edit-and-continue mapping descriptors. This table does not exist in // optimized metadata (#~ stream). ENCMap = 31 // The current assembly descriptor, which should appear only in the prime // module metadata. Assembly = 32 // This table is unused. AssemblyProcessor = 33 // This table is unused. AssemblyOS = 34 // Assembly reference descriptors. AssemblyRef = 35 // This table is unused. AssemblyRefProcessor = 36 // This table is unused. AssemblyRefOS = 37 // File descriptors that contain information about other files in the // current assembly. FileMD = 38 // Exported type descriptors that contain information about public classes // exported by the current assembly, which are declared in other modules of // the assembly. Only the prime module of the assembly should carry this // table. ExportedType = 39 // Managed resource descriptors. ManifestResource = 40 // Nested class descriptors that provide mapping of nested classes to their // respective enclosing classes. NestedClass = 41 // Type parameter descriptors for generic (parameterized) classes and // methods. GenericParam = 42 // Generic method instantiation descriptors. MethodSpec = 43 // Descriptors of constraints specified for type parameters of generic // classes and methods GenericParamConstraint = 44 ) // Heaps Streams Bit Positions. const ( StringStream = 0 GUIDStream = 1 BlobStream = 2 ) // MetadataTableIndexToString returns the string representation of the metadata // table index. func MetadataTableIndexToString(k int) string { metadataTablesMap := map[int]string{ Module: "Module", TypeRef: "TypeRef", TypeDef: "TypeDef", FieldPtr: "FieldPtr", Field: "Field", MethodPtr: "MethodPtr", MethodDef: "MethodDef", ParamPtr: "ParamPtr", Param: "Param", InterfaceImpl: "InterfaceImpl", MemberRef: "MemberRef", Constant: "Constant", CustomAttribute: "CustomAttribute", FieldMarshal: "FieldMarshal", DeclSecurity: "DeclSecurity", ClassLayout: "ClassLayout", FieldLayout: "FieldLayout", StandAloneSig: "StandAloneSig", EventMap: "EventMap", EventPtr: "EventPtr", Event: "Event", PropertyMap: "PropertyMap", PropertyPtr: "PropertyPtr", Property: "Property", MethodSemantics: "MethodSemantics", MethodImpl: "MethodImpl", ModuleRef: "ModuleRef", TypeSpec: "TypeSpec", ImplMap: "ImplMap", FieldRVA: "FieldRVA", ENCLog: "ENCLog", ENCMap: "ENCMap", Assembly: "Assembly", AssemblyProcessor: "AssemblyProcessor", AssemblyOS: "AssemblyOS", AssemblyRef: "AssemblyRef", AssemblyRefProcessor: "AssemblyRefProcessor", AssemblyRefOS: "AssemblyRefOS", FileMD: "File", ExportedType: "ExportedType", ManifestResource: "ManifestResource", NestedClass: "NestedClass", GenericParam: "GenericParam", MethodSpec: "MethodSpec", GenericParamConstraint: "GenericParamConstraint", } if value, ok := metadataTablesMap[k]; ok { return value } return "" } // GetMetadataStreamIndexSize returns the size of indexes to read into a // particular heap. func (pe *File) GetMetadataStreamIndexSize(BitPosition int) int { // The `Heaps` field is a bit vector that encodes how wide indexes into the // various heaps are: // - If bit 0 is set, indexes into the "#String" heap are 4 bytes wide; // - if bit 1 is set, indexes into the "#GUID" heap are 4 bytes wide; // - if bit 2 is set, indexes into the "#Blob" heap are 4 bytes wide. heaps := pe.CLR.MetadataTablesStreamHeader.Heaps if IsBitSet(uint64(heaps), BitPosition) { return 4 } // Conversely, if the HeapSizes bit for a particular heap is not set, // indexes into that heap are 2 bytes wide. return 2 } // ImageDataDirectory represents the directory format. type ImageDataDirectory struct { // The relative virtual address of the table. VirtualAddress uint32 `json:"virtual_address"` // The size of the table, in bytes. Size uint32 `json:"size"` } // ImageCOR20Header represents the CLR 2.0 header structure. type ImageCOR20Header struct { // Size of the header in bytes. Cb uint32 `json:"cb"` // Major number of the minimum version of the runtime required to run the // program. MajorRuntimeVersion uint16 `json:"major_runtime_version"` // Minor number of the version of the runtime required to run the program. MinorRuntimeVersion uint16 `json:"minor_runtime_version"` // RVA and size of the metadata. MetaData ImageDataDirectory `json:"meta_data"` // Bitwise flags indicating attributes of this executable. Flags COMImageFlagsType `json:"flags"` // Metadata identifier (token) of the entry point for the image file; can // be 0 for DLL images. This field identifies a method belonging to this // module or a module containing the entry point method. // In images of version 2.0 and newer, this field may contain RVA of the // embedded native entry point method. // union { // // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is not set, // EntryPointToken represents a managed entrypoint. // DWORD EntryPointToken; // // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is set, // EntryPointRVA represents an RVA to a native entrypoint // DWORD EntryPointRVA; //}; EntryPointRVAorToken uint32 `json:"entry_point_rva_or_token"` // This is the blob of managed resources. Fetched using // code:AssemblyNative.GetResource and code:PEFile.GetResource and accessible // from managed code from System.Assembly.GetManifestResourceStream. The // metadata has a table that maps names to offsets into this blob, so // logically the blob is a set of resources. Resources ImageDataDirectory `json:"resources"` // RVA and size of the hash data for this PE file, used by the loader for // binding and versioning. IL assemblies can be signed with a public-private // key to validate who created it. The signature goes here if this feature // is used. StrongNameSignature ImageDataDirectory `json:"strong_name_signature"` // RVA and size of the Code Manager table. In the existing releases of the // runtime, this field is reserved and must be set to 0. CodeManagerTable ImageDataDirectory `json:"code_manager_table"` // RVA and size in bytes of an array of virtual table (v-table) fixups. // Among current managed compilers, only the VC++ linker and the IL // assembler can produce this array. VTableFixups ImageDataDirectory `json:"vtable_fixups"` // RVA and size of an array of addresses of jump thunks. Among managed // compilers, only the VC++ of versions pre-8.0 could produce this table, // which allows the export of unmanaged native methods embedded in the // managed PE file. In v2.0+ of CLR this entry is obsolete and must be set // to 0. ExportAddressTableJumps ImageDataDirectory `json:"export_address_table_jumps"` // Reserved for precompiled images; set to 0 // NGEN images it points at a code:CORCOMPILE_HEADER structure ManagedNativeHeader ImageDataDirectory `json:"managed_native_header"` } // ImageCORVTableFixup defines the v-table fixups that contains the // initializing information necessary for the runtime to create the thunks. // Non VOS v-table entries. Define an array of these pointed to by // IMAGE_COR20_HEADER.VTableFixups. Each entry describes a contiguous array of // v-table slots. The slots start out initialized to the meta data token value // for the method they need to call. At image load time, the CLR Loader will // turn each entry into a pointer to machine code for the CPU and can be // called directly. type ImageCORVTableFixup struct { RVA uint32 `json:"rva"` // Offset of v-table array in image. Count uint16 `json:"count"` // How many entries at location. Type uint16 `json:"type"` // COR_VTABLE_xxx type of entries. } // MetadataHeader consists of a storage signature and a storage header. type MetadataHeader struct { // The storage signature, which must be 4-byte aligned: // ”Magic” signature for physical metadata, currently 0x424A5342, or, read // as characters, BSJB—the initials of four “founding fathers” Brian Harry, // Susa Radke-Sproull, Jason Zander, and Bill Evans, who started the // runtime development in 1998. Signature uint32 `json:"signature"` // Major version. MajorVersion uint16 `json:"major_version"` // Minor version. MinorVersion uint16 `json:"minor_version"` // Reserved; set to 0. ExtraData uint32 `json:"extra_data"` // Length of the version string. VersionString uint32 `json:"version_string"` // Version string. Version string `json:"version"` // The storage header follows the storage signature, aligned on a 4-byte // boundary. // // Reserved; set to 0. Flags uint8 `json:"flags"` // Another byte used for [padding] // Number of streams. Streams uint16 `json:"streams"` } // MetadataStreamHeader represents a Metadata Stream Header Structure. type MetadataStreamHeader struct { // Offset in the file for this stream. Offset uint32 `json:"offset"` // Size of the stream in bytes. Size uint32 `json:"size"` // Name of the stream; a zero-terminated ASCII string no longer than 31 // characters (plus zero terminator). The name might be shorter, in which // case the size of the stream header is correspondingly reduced, padded to // the 4-byte boundary. Name string `json:"name"` } // MetadataTableStreamHeader represents the Metadata Table Stream Header Structure. type MetadataTableStreamHeader struct { // Reserved; set to 0. Reserved uint32 `json:"reserved"` // Major version of the table schema (1 for v1.0 and v1.1; 2 for v2.0 or later). MajorVersion uint8 `json:"major_version"` // Minor version of the table schema (0 for all versions). MinorVersion uint8 `json:"minor_version"` // Binary flags indicate the offset sizes to be used within the heaps. // 4-byte unsigned integer offset is indicated by: // - 0x01 for a string heap, 0x02 for a GUID heap, and 0x04 for a blob heap. // If a flag is not set, the respective heap offset is a 2-byte unsigned integer. // A #- stream can also have special flags set: // - flag 0x20, indicating that the stream contains only changes made // during an edit-and-continue session, and; // - flag 0x80, indicating that the metadata might contain items marked as // deleted. Heaps uint8 `json:"heaps"` // Bit width of the maximal record index to all tables of the metadata; // calculated at run time (during the metadata stream initialization). RID uint8 `json:"rid"` // Bit vector of present tables, each bit representing one table (1 if // present). MaskValid uint64 `json:"mask_valid"` // Bit vector of sorted tables, each bit representing a respective table (1 // if sorted) Sorted uint64 `json:"sorted"` } // MetadataTable represents the content of a particular table in the metadata. // The metadata schema defines 45 tables. type MetadataTable struct { // The name of the table. Name string `json:"name"` // Number of columns in the table. CountCols uint32 `json:"count_cols"` // Every table has a different layout, defined in the ECMA-335 spec. // Content abstract the type each table is pointing to. Content interface{} `json:"content"` } // CLRData embeds the Common Language Runtime Header structure as well as the // Metadata header structure. type CLRData struct { CLRHeader ImageCOR20Header `json:"clr_header"` MetadataHeader MetadataHeader `json:"metadata_header"` MetadataStreamHeaders []MetadataStreamHeader `json:"metadata_stream_headers"` MetadataStreams map[string][]byte `json:"-"` MetadataTablesStreamHeader MetadataTableStreamHeader `json:"metadata_tables_stream_header"` MetadataTables map[int]*MetadataTable `json:"metadata_tables"` StringStreamIndexSize int `json:"-"` GUIDStreamIndexSize int `json:"-"` BlobStreamIndexSize int `json:"-"` } func (pe *File) parseMetadataStream(off, size uint32) (MetadataTableStreamHeader, error) { mdTableStreamHdr := MetadataTableStreamHeader{} if size == 0 { return mdTableStreamHdr, nil } mdTableStreamHdrSize := uint32(binary.Size(mdTableStreamHdr)) err := pe.structUnpack(&mdTableStreamHdr, off, mdTableStreamHdrSize) if err != nil { return mdTableStreamHdr, err } return mdTableStreamHdr, nil } func (pe *File) parseMetadataHeader(offset, size uint32) (MetadataHeader, error) { var err error mh := MetadataHeader{} if mh.Signature, err = pe.ReadUint32(offset); err != nil { return mh, err } if mh.MajorVersion, err = pe.ReadUint16(offset + 4); err != nil { return mh, err } if mh.MinorVersion, err = pe.ReadUint16(offset + 6); err != nil { return mh, err } if mh.ExtraData, err = pe.ReadUint32(offset + 8); err != nil { return mh, err } if mh.VersionString, err = pe.ReadUint32(offset + 12); err != nil { return mh, err } mh.Version, err = pe.getStringAtOffset(offset+16, mh.VersionString) if err != nil { return mh, err } offset += 16 + mh.VersionString if mh.Flags, err = pe.ReadUint8(offset); err != nil { return mh, err } if mh.Streams, err = pe.ReadUint16(offset + 2); err != nil { return mh, err } return mh, err } // The 15th directory entry of the PE header contains the RVA and size of the // runtime header in the image file. The runtime header, which contains all of // the runtime-specific data entries and other information, should reside in a // read-only section of the image file. The IL assembler puts the common // language runtime header in the .text section. func (pe *File) parseCLRHeaderDirectory(rva, size uint32) error { clrHeader := ImageCOR20Header{} offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&clrHeader, offset, size) if err != nil { return err } pe.CLR.CLRHeader = clrHeader if clrHeader.MetaData.VirtualAddress == 0 || clrHeader.MetaData.Size == 0 { return nil } // If we get a CLR header, we assume that this is enough // to say we have a CLR data to show even if parsing // other structures fails later. pe.HasCLR = true if pe.opts.OmitCLRMetadata { return nil } offset = pe.GetOffsetFromRva(clrHeader.MetaData.VirtualAddress) mh, err := pe.parseMetadataHeader(offset, clrHeader.MetaData.Size) if err != nil { return err } pe.CLR.MetadataHeader = mh pe.CLR.MetadataStreams = make(map[string][]byte) offset += 16 + mh.VersionString + 4 // Immediately following the MetadataHeader is a series of Stream Headers. // A “stream” is to the metadata what a “section” is to the assembly. The // NumberOfStreams property indicates how many StreamHeaders to read. mdStreamHdrOff := uint32(0) mdStreamHdrSize := uint32(0) for i := uint16(0); i < mh.Streams; i++ { sh := MetadataStreamHeader{} if sh.Offset, err = pe.ReadUint32(offset); err != nil { return err } if sh.Size, err = pe.ReadUint32(offset + 4); err != nil { return err } // Name requires a special treatment. offset += 8 for j := uint32(0); j <= 32; j++ { var c uint8 if c, err = pe.ReadUint8(offset); err != nil { return err } offset++ if c == 0 && (j+1)%4 == 0 { break } if c != 0 { sh.Name += string(c) } } // The streams #~ and #- are mutually exclusive; that is, the metadata // structure of the module is either optimized or un-optimized; it // cannot be both at the same time or be something in between. if sh.Name == "#~" || sh.Name == "#-" { mdStreamHdrOff = sh.Offset mdStreamHdrSize = sh.Size } rva = clrHeader.MetaData.VirtualAddress + sh.Offset start := pe.GetOffsetFromRva(rva) // Some malformed/Corrupt PEs has invalid sizes on sh. mdStreamBytes := make([]byte, 0) if start+sh.Size <= uint32(len(pe.data)) { mdStreamBytes = pe.data[start : start+sh.Size] } // Save the stream into a map []byte. pe.CLR.MetadataStreams[sh.Name] = mdStreamBytes pe.CLR.MetadataStreamHeaders = append(pe.CLR.MetadataStreamHeaders, sh) } // Get the Metadata Table Stream. if mdStreamHdrSize == 0 { return nil } // The .Offset indicated by the stream header is an RVA relative to the // metadataDirectoryAddress in the CLRHeader. rva = clrHeader.MetaData.VirtualAddress + mdStreamHdrOff offset = pe.GetOffsetFromRva(rva) mdTableStreamHdr, err := pe.parseMetadataStream(offset, mdStreamHdrSize) if err != nil { return nil } pe.CLR.MetadataTablesStreamHeader = mdTableStreamHdr // Get the size of indexes of #String", "#GUID" and "#Blob" streams. pe.CLR.StringStreamIndexSize = pe.GetMetadataStreamIndexSize(StringStream) pe.CLR.GUIDStreamIndexSize = pe.GetMetadataStreamIndexSize(GUIDStream) pe.CLR.BlobStreamIndexSize = pe.GetMetadataStreamIndexSize(BlobStream) // This header is followed by a sequence of 4-byte unsigned integers // indicating the number of records in each table marked 1 in the MaskValid // bit vector. offset += uint32(binary.Size(mdTableStreamHdr)) pe.CLR.MetadataTables = make(map[int]*MetadataTable) for i := 0; i <= GenericParamConstraint; i++ { if IsBitSet(mdTableStreamHdr.MaskValid, i) { mdTable := MetadataTable{} mdTable.Name = MetadataTableIndexToString(i) mdTable.CountCols, err = pe.ReadUint32(offset) if err != nil { break } offset += 4 pe.CLR.MetadataTables[i] = &mdTable } } // Parse the metadata tables. for tableIndex := 0; tableIndex <= GenericParamConstraint; tableIndex++ { table, ok := pe.CLR.MetadataTables[tableIndex] if !ok { continue } n := uint32(0) switch tableIndex { case Module: // 0x00 table.Content, n, err = pe.parseMetadataModuleTable(offset) case TypeRef: // 0x01 table.Content, n, err = pe.parseMetadataTypeRefTable(offset) case TypeDef: // 0x02 table.Content, n, err = pe.parseMetadataTypeDefTable(offset) case Field: // 0x04 table.Content, n, err = pe.parseMetadataFieldTable(offset) case MethodDef: // 0x06 table.Content, n, err = pe.parseMetadataMethodDefTable(offset) case Param: // 0x08 table.Content, n, err = pe.parseMetadataParamTable(offset) case InterfaceImpl: // 0x09 table.Content, n, err = pe.parseMetadataInterfaceImplTable(offset) case MemberRef: // 0x0a table.Content, n, err = pe.parseMetadataMemberRefTable(offset) case Constant: // 0x0b table.Content, n, err = pe.parseMetadataConstantTable(offset) case CustomAttribute: // 0x0c table.Content, n, err = pe.parseMetadataCustomAttributeTable(offset) case FieldMarshal: // 0x0d table.Content, n, err = pe.parseMetadataFieldMarshalTable(offset) case DeclSecurity: // 0x0e table.Content, n, err = pe.parseMetadataDeclSecurityTable(offset) case ClassLayout: // 0x0f table.Content, n, err = pe.parseMetadataClassLayoutTable(offset) case FieldLayout: // 0x10 table.Content, n, err = pe.parseMetadataFieldLayoutTable(offset) case StandAloneSig: // 0x11 table.Content, n, err = pe.parseMetadataStandAloneSignTable(offset) case EventMap: // 0x12 table.Content, n, err = pe.parseMetadataEventMapTable(offset) case Event: // 0x14 table.Content, n, err = pe.parseMetadataEventTable(offset) case PropertyMap: // 0x15 table.Content, n, err = pe.parseMetadataPropertyMapTable(offset) case Property: // 0x17 table.Content, n, err = pe.parseMetadataPropertyTable(offset) case MethodSemantics: // 0x18 table.Content, n, err = pe.parseMetadataMethodSemanticsTable(offset) case MethodImpl: // 0x19 table.Content, n, err = pe.parseMetadataMethodImplTable(offset) case ModuleRef: // 0x1a table.Content, n, err = pe.parseMetadataModuleRefTable(offset) case TypeSpec: // 0x1b table.Content, n, err = pe.parseMetadataTypeSpecTable(offset) case ImplMap: // 0x1c table.Content, n, err = pe.parseMetadataImplMapTable(offset) case FieldRVA: // 0x1d table.Content, n, err = pe.parseMetadataFieldRVATable(offset) case Assembly: // 0x20 table.Content, n, err = pe.parseMetadataAssemblyTable(offset) case AssemblyRef: // 0x23 table.Content, n, err = pe.parseMetadataAssemblyRefTable(offset) case ExportedType: // 0x27 table.Content, n, err = pe.parseMetadataExportedTypeTable(offset) case ManifestResource: // 0x28 table.Content, n, err = pe.parseMetadataManifestResourceTable(offset) case NestedClass: // 0x29 table.Content, n, err = pe.parseMetadataNestedClassTable(offset) case GenericParam: // 0x2a table.Content, n, err = pe.parseMetadataGenericParamTable(offset) case MethodSpec: // 0x2b table.Content, n, err = pe.parseMetadataMethodSpecTable(offset) case GenericParamConstraint: // 0x2c table.Content, n, err = pe.parseMetadataGenericParamConstraintTable(offset) case FileMD: // 0x26 table.Content, n, err = pe.parseMetadataFileTable(offset) default: pe.logger.Warnf("unhandled metadata table %d %s offset 0x%x cols %d", tableIndex, MetadataTableIndexToString(tableIndex), offset, table.CountCols) } if err != nil { pe.logger.Warnf("parsing metadata table %s failed with %v", MetadataTableIndexToString(tableIndex), err) } offset += n } return nil } // String returns a string interpretation of a COMImageFlags type. func (flags COMImageFlagsType) String() []string { COMImageFlags := map[COMImageFlagsType]string{ COMImageFlagsILOnly: "IL Only", COMImageFlags32BitRequired: "32-Bit Required", COMImageFlagILLibrary: "IL Library", COMImageFlagsStrongNameSigned: "Strong Name Signed", COMImageFlagsNativeEntrypoint: "Native Entrypoint", COMImageFlagsTrackDebugData: "Track Debug Data", COMImageFlags32BitPreferred: "32-Bit Preferred", } var values []string for k, v := range COMImageFlags { if (k & flags) == k { values = append(values, v) } } return values } ================================================ FILE: dotnet_helper.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe const ( // these are intentionally made so they do not collide with StringStream, GUIDStream, and BlobStream // they are used only for the getCodedIndexSize function idxStringStream = iota + 100 idxGUIDStream idxBlobStream ) type codedidx struct { tagbits uint8 idx []int } var ( idxTypeDefOrRef = codedidx{tagbits: 2, idx: []int{TypeDef, TypeRef, TypeSpec}} idxResolutionScope = codedidx{tagbits: 2, idx: []int{Module, ModuleRef, AssemblyRef, TypeRef}} idxMemberRefParent = codedidx{tagbits: 3, idx: []int{TypeDef, TypeRef, ModuleRef, MethodDef, TypeSpec}} idxHasConstant = codedidx{tagbits: 2, idx: []int{Field, Param, Property}} idxHasCustomAttributes = codedidx{tagbits: 5, idx: []int{MethodDef, Field, TypeRef, TypeDef, Param, InterfaceImpl, MemberRef, Module, DeclSecurity, Property, Event, StandAloneSig, ModuleRef, TypeSpec, Assembly, AssemblyRef, FileMD, ExportedType, ManifestResource, GenericParam, GenericParamConstraint, MethodSpec}} idxCustomAttributeType = codedidx{tagbits: 3, idx: []int{MethodDef, MemberRef}} idxHasFieldMarshall = codedidx{tagbits: 1, idx: []int{Field, Param}} idxHasDeclSecurity = codedidx{tagbits: 2, idx: []int{TypeDef, MethodDef, Assembly}} idxHasSemantics = codedidx{tagbits: 1, idx: []int{Event, Property}} idxMethodDefOrRef = codedidx{tagbits: 1, idx: []int{MethodDef, MemberRef}} idxMemberForwarded = codedidx{tagbits: 1, idx: []int{Field, MethodDef}} idxImplementation = codedidx{tagbits: 2, idx: []int{FileMD, AssemblyRef, ExportedType}} idxTypeOrMethodDef = codedidx{tagbits: 1, idx: []int{TypeDef, MethodDef}} idxField = codedidx{tagbits: 0, idx: []int{Field}} idxMethodDef = codedidx{tagbits: 0, idx: []int{MethodDef}} idxParam = codedidx{tagbits: 0, idx: []int{Param}} idxTypeDef = codedidx{tagbits: 0, idx: []int{TypeDef}} idxEvent = codedidx{tagbits: 0, idx: []int{Event}} idxProperty = codedidx{tagbits: 0, idx: []int{Property}} idxModuleRef = codedidx{tagbits: 0, idx: []int{ModuleRef}} idxGenericParam = codedidx{tagbits: 0, idx: []int{GenericParam}} idxString = codedidx{tagbits: 0, idx: []int{idxStringStream}} idxBlob = codedidx{tagbits: 0, idx: []int{idxBlobStream}} idxGUID = codedidx{tagbits: 0, idx: []int{idxGUIDStream}} ) func (pe *File) getCodedIndexSize(tagbits uint32, idx ...int) uint32 { // special case String/GUID/Blob streams switch idx[0] { case int(idxStringStream): return uint32(pe.GetMetadataStreamIndexSize(StringStream)) case int(idxGUIDStream): return uint32(pe.GetMetadataStreamIndexSize(GUIDStream)) case int(idxBlobStream): return uint32(pe.GetMetadataStreamIndexSize(BlobStream)) } // now deal with coded indices or single table var maxIndex16 uint32 = 1 << (16 - tagbits) var maxColumnCount uint32 for _, tblidx := range idx { tbl, ok := pe.CLR.MetadataTables[tblidx] if ok { if tbl.CountCols > maxColumnCount { maxColumnCount = tbl.CountCols } } } if maxColumnCount >= maxIndex16 { return 4 } return 2 } func (pe *File) readFromMetadataStream(cidx codedidx, off uint32, out *uint32) (uint32, error) { indexSize := pe.getCodedIndexSize(uint32(cidx.tagbits), cidx.idx...) var data uint32 var err error switch indexSize { case 2: d, err := pe.ReadUint16(off) if err != nil { return 0, err } data = uint32(d) case 4: data, err = pe.ReadUint32(off) if err != nil { return 0, err } } *out = data return uint32(indexSize), nil } ================================================ FILE: dotnet_metadata_tables.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe // the struct definition and comments are from the ECMA-335 spec 6th edition // https://www.ecma-international.org/wp-content/uploads/ECMA-335_6th_edition_june_2012.pdf // Module 0x00 type ModuleTableRow struct { // a 2-byte value, reserved, shall be zero Generation uint16 `json:"generation"` // an index into the String heap Name uint32 `json:"name"` // an index into the Guid heap; simply a Guid used to distinguish between // two versions of the same module Mvid uint32 `json:"mvid"` // an index into the Guid heap; reserved, shall be zero EncID uint32 `json:"enc_id"` // an index into the Guid heap; reserved, shall be zero EncBaseID uint32 `json:"enc_base_id"` } // Module 0x00 func (pe *File) parseMetadataModuleTable(off uint32) ([]ModuleTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Module].CountCols) rows := make([]ModuleTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Generation, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxGUID, off, &rows[i].Mvid); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxGUID, off, &rows[i].EncID); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxGUID, off, &rows[i].EncBaseID); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // TypeRef 0x01 type TypeRefTableRow struct { // an index into a Module, ModuleRef, AssemblyRef or TypeRef table, or null; // more precisely, a ResolutionScope (§II.24.2.6) coded index. ResolutionScope uint32 `json:"resolution_scope"` // an index into the String heap TypeName uint32 `json:"type_name"` // an index into the String heap TypeNamespace uint32 `json:"type_namespace"` } // TypeRef 0x01 func (pe *File) parseMetadataTypeRefTable(off uint32) ([]TypeRefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[TypeRef].CountCols) rows := make([]TypeRefTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxResolutionScope, off, &rows[i].ResolutionScope); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeName); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeNamespace); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // TypeDef 0x02 type TypeDefTableRow struct { // a 4-byte bitmask of type TypeAttributes, §II.23.1.15 Flags uint32 `json:"flags"` // an index into the String heap TypeName uint32 `json:"type_name"` // an index into the String heap TypeNamespace uint32 `json:"type_namespace"` // an index into the TypeDef, TypeRef, or TypeSpec table; more precisely, // a TypeDefOrRef (§II.24.2.6) coded index Extends uint32 `json:"extends"` // an index into the Field table; it marks the first of a contiguous run // of Fields owned by this Type FieldList uint32 `json:"field_list"` // an index into the MethodDef table; it marks the first of a contiguous // run of Methods owned by this Type MethodList uint32 `json:"method_list"` } // TypeDef 0x02 func (pe *File) parseMetadataTypeDefTable(off uint32) ([]TypeDefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[TypeDef].CountCols) rows := make([]TypeDefTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeName); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeNamespace); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxTypeDefOrRef, off, &rows[i].Extends); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxField, off, &rows[i].FieldList); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxMethodDef, off, &rows[i].MethodList); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Field 0x04 type FieldTableRow struct { // a 2-byte bitmask of type FieldAttributes, §II.23.1.5 Flags uint16 `json:"flags"` // an index into the String heap Name uint32 `json:"name"` // an index into the Blob heap Signature uint32 `json:"signature"` } // Field 0x04 func (pe *File) parseMetadataFieldTable(off uint32) ([]FieldTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Field].CountCols) rows := make([]FieldTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Signature); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // MethodDef 0x06 type MethodDefTableRow struct { // a 4-byte constant RVA uint32 `json:"rva"` // a 2-byte bitmask of type MethodImplAttributes, §II.23.1.10 ImplFlags uint16 `json:"impl_flags"` // a 2-byte bitmask of type MethodAttributes, §II.23.1.10 Flags uint16 `json:"flags"` // an index into the String heap Name uint32 `json:"name"` // an index into the Blob heap Signature uint32 `json:"signature"` // an index into the Param table ParamList uint32 `json:"param_list"` } // MethodDef 0x06 func (pe *File) parseMetadataMethodDefTable(off uint32) ([]MethodDefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[MethodDef].CountCols) rows := make([]MethodDefTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].RVA, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if rows[i].ImplFlags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].Flags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Signature); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxParam, off, &rows[i].ParamList); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Param 0x08 type ParamTableRow struct { // a 2-byte bitmask of type ParamAttributes, §II.23.1.13 Flags uint16 `json:"flags"` // a 2-byte constant Sequence uint16 `json:"sequence"` // an index into the String heap Name uint32 `json:"name"` } // Param 0x08 func (pe *File) parseMetadataParamTable(off uint32) ([]ParamTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Param].CountCols) rows := make([]ParamTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].Sequence, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // InterfaceImpl 0x09 type InterfaceImplTableRow struct { // an index into the TypeDef table Class uint32 `json:"class"` // an index into the TypeDef, TypeRef, or TypeSpec table; more precisely, // a TypeDefOrRef (§II.24.2.6) coded index Interface uint32 `json:"interface"` } // InterfaceImpl 0x09 func (pe *File) parseMetadataInterfaceImplTable(off uint32) ([]InterfaceImplTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[InterfaceImpl].CountCols) rows := make([]InterfaceImplTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].Class); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxTypeDefOrRef, off, &rows[i].Interface); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // MembersRef 0x0a type MemberRefTableRow struct { // an index into the MethodDef, ModuleRef,TypeDef, TypeRef, or TypeSpec // tables; more precisely, a MemberRefParent (§II.24.2.6) coded index Class uint32 `json:"class"` // // an index into the String heap Name uint32 `json:"name"` // an index into the Blob heap Signature uint32 `json:"signature"` } // MembersRef 0x0a func (pe *File) parseMetadataMemberRefTable(off uint32) ([]MemberRefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[MemberRef].CountCols) rows := make([]MemberRefTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxMemberRefParent, off, &rows[i].Class); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Signature); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Constant 0x0b type ConstantTableRow struct { // a 1-byte constant, followed by a 1-byte padding zero Type uint8 `json:"type"` // padding zero Padding uint8 `json:"padding"` // padding zero // an index into the Param, Field, or Property table; more precisely, // a HasConstant (§II.24.2.6) coded index Parent uint32 `json:"parent"` // an index into the Blob heap Value uint32 `json:"value"` } // Constant 0x0b func (pe *File) parseMetadataConstantTable(off uint32) ([]ConstantTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Constant].CountCols) rows := make([]ConstantTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Type, err = pe.ReadUint8(off); err != nil { return rows, n, err } off += 1 n += 1 if rows[i].Padding, err = pe.ReadUint8(off); err != nil { return rows, n, err } off += 1 n += 1 if indexSize, err = pe.readFromMetadataStream(idxHasConstant, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Value); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // CustomAttribute 0x0c type CustomAttributeTableRow struct { // an index into a metadata table that has an associated HasCustomAttribute // (§II.24.2.6) coded index Parent uint32 `json:"parent"` // an index into the MethodDef or MemberRef table; more precisely, // a CustomAttributeType (§II.24.2.6) coded index Type uint32 `json:"type"` // an index into the Blob heap Value uint32 `json:"value"` } // CustomAttribute 0x0c func (pe *File) parseMetadataCustomAttributeTable(off uint32) ([]CustomAttributeTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[CustomAttribute].CountCols) rows := make([]CustomAttributeTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxHasCustomAttributes, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxCustomAttributeType, off, &rows[i].Type); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Value); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // FieldMarshal 0x0d type FieldMarshalTableRow struct { // an index into Field or Param table; more precisely, // a HasFieldMarshal (§II.24.2.6) coded index Parent uint32 `json:"parent"` // an index into the Blob heap NativeType uint32 `json:"native_type"` } // FieldMarshal 0x0d func (pe *File) parseMetadataFieldMarshalTable(off uint32) ([]FieldMarshalTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[FieldMarshal].CountCols) rows := make([]FieldMarshalTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxHasFieldMarshall, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].NativeType); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // DeclSecurity 0x0e type DeclSecurityTableRow struct { // a 2-byte value Action uint16 `json:"action"` // an index into the TypeDef, MethodDef, or Assembly table; // more precisely, a HasDeclSecurity (§II.24.2.6) coded index Parent uint32 `json:"parent"` // // an index into the Blob heap PermissionSet uint32 `json:"permission_set"` } // DeclSecurity 0x0e func (pe *File) parseMetadataDeclSecurityTable(off uint32) ([]DeclSecurityTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[DeclSecurity].CountCols) rows := make([]DeclSecurityTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Action, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxHasDeclSecurity, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].PermissionSet); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // ClassLayout 0x0f type ClassLayoutTableRow struct { // a 2-byte constant PackingSize uint16 `json:"packing_size"` // a 4-byte constant ClassSize uint32 `json:"class_size"` // an index into the TypeDef table Parent uint32 `json:"parent"` } // ClassLayout 0x0f func (pe *File) parseMetadataClassLayoutTable(off uint32) ([]ClassLayoutTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[ClassLayout].CountCols) rows := make([]ClassLayoutTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].PackingSize, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].ClassSize, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // FieldLayout 0x10 type FieldLayoutTableRow struct { Offset uint32 `json:"offset"` // a 4-byte constant Field uint32 `json:"field"` // an index into the Field table } // FieldLayout 0x10 func (pe *File) parseMetadataFieldLayoutTable(off uint32) ([]FieldLayoutTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[FieldLayout].CountCols) rows := make([]FieldLayoutTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Offset, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxField, off, &rows[i].Field); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // StandAloneSig 0x11 type StandAloneSigTableRow struct { Signature uint32 `json:"signature"` // an index into the Blob heap } // StandAloneSig 0x11 func (pe *File) parseMetadataStandAloneSignTable(off uint32) ([]StandAloneSigTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[StandAloneSig].CountCols) rows := make([]StandAloneSigTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Signature); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // EventMap 0x12 type EventMapTableRow struct { // an index into the TypeDef table Parent uint32 `json:"parent"` // an index into the Event table EventList uint32 `json:"event_list"` } // EventMap 0x12 func (pe *File) parseMetadataEventMapTable(off uint32) ([]EventMapTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[EventMap].CountCols) rows := make([]EventMapTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxEvent, off, &rows[i].EventList); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Event 0x14 type EventTableRow struct { // a 2-byte bitmask of type EventAttributes, §II.23.1.4 EventFlags uint16 `json:"event_flags"` // an index into the String heap Name uint32 `json:"name"` // an index into a TypeDef, a TypeRef, or TypeSpec table; more precisely, // a TypeDefOrRef (§II.24.2.6) coded index) EventType uint32 `json:"event_type"` } // Event 0x14 func (pe *File) parseMetadataEventTable(off uint32) ([]EventTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Event].CountCols) rows := make([]EventTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].EventFlags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxTypeDefOrRef, off, &rows[i].EventType); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // PropertyMap 0x15 type PropertyMapTableRow struct { // an index into the TypeDef table Parent uint32 `json:"parent"` // an index into the Property table PropertyList uint32 `json:"property_list"` } // PropertyMap 0x15 func (pe *File) parseMetadataPropertyMapTable(off uint32) ([]PropertyMapTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[PropertyMap].CountCols) rows := make([]PropertyMapTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].Parent); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxProperty, off, &rows[i].PropertyList); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Property 0x17 type PropertyTableRow struct { // a 2-byte bitmask of type PropertyAttributes, §II.23.1.14 Flags uint16 `json:"flags"` // an index into the String heap Name uint32 `json:"name"` // an index into the Blob heap Type uint32 `json:"type"` } // Property 0x17 func (pe *File) parseMetadataPropertyTable(off uint32) ([]PropertyTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Property].CountCols) rows := make([]PropertyTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Type); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // MethodSemantics 0x18 type MethodSemanticsTableRow struct { // a 2-byte bitmask of type MethodSemanticsAttributes, §II.23.1.12 Semantics uint16 `json:"semantics"` // an index into the MethodDef table Method uint32 `json:"method"` // an index into the Event or Property table; more precisely, // a HasSemantics (§II.24.2.6) coded index Association uint32 `json:"association"` } // MethodSemantics 0x18 func (pe *File) parseMetadataMethodSemanticsTable(off uint32) ([]MethodSemanticsTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[MethodSemantics].CountCols) rows := make([]MethodSemanticsTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Semantics, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxMethodDef, off, &rows[i].Method); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxHasSemantics, off, &rows[i].Association); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // MethodImpl 0x19 type MethodImplTableRow struct { // an index into the TypeDef table Class uint32 `json:"class"` // an index into the MethodDef or MemberRef table; more precisely, a // MethodDefOrRef (§II.24.2.6) coded index MethodBody uint32 `json:"method_body"` // // an index into the MethodDef or MemberRef table; more precisely, a // MethodDefOrRef (§II.24.2.6) coded index MethodDeclaration uint32 `json:"method_declaration"` } // MethodImpl 0x19 func (pe *File) parseMetadataMethodImplTable(off uint32) ([]MethodImplTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[MethodImpl].CountCols) rows := make([]MethodImplTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].Class); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxMethodDefOrRef, off, &rows[i].MethodBody); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxMethodDefOrRef, off, &rows[i].MethodDeclaration); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // ModuleRef 0x1a type ModuleRefTableRow struct { // an index into the String heap Name uint32 `json:"name"` } // ModuleRef 0x1a func (pe *File) parseMetadataModuleRefTable(off uint32) ([]ModuleRefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[ModuleRef].CountCols) rows := make([]ModuleRefTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // TypeSpec 0x1b type TypeSpecTableRow struct { // an index into the Blob heap Signature uint32 `json:"signature"` } // TypeSpec 0x1b func (pe *File) parseMetadataTypeSpecTable(off uint32) ([]TypeSpecTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[TypeSpec].CountCols) rows := make([]TypeSpecTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Signature); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // ImplMap 0x1c type ImplMapTableRow struct { // a 2-byte bitmask of type PInvokeAttributes, §23.1.8 MappingFlags uint16 `json:"mapping_flags"` // an index into the Field or MethodDef table; more precisely, // a MemberForwarded (§II.24.2.6) coded index) MemberForwarded uint32 `json:"member_forwarded"` // an index into the String heap ImportName uint32 `json:"import_name"` // an index into the ModuleRef table ImportScope uint32 `json:"import_scope"` } // ImplMap 0x1c func (pe *File) parseMetadataImplMapTable(off uint32) ([]ImplMapTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[ImplMap].CountCols) rows := make([]ImplMapTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].MappingFlags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxMemberForwarded, off, &rows[i].MemberForwarded); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].ImportName); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxModuleRef, off, &rows[i].ImportScope); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // FieldRVA 0x1d type FieldRVATableRow struct { // 4-byte constant RVA uint32 `json:"rva"` // an index into Field table Field uint32 `json:"field"` } // FieldRVA 0x1d func (pe *File) parseMetadataFieldRVATable(off uint32) ([]FieldRVATableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[FieldRVA].CountCols) rows := make([]FieldRVATableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].RVA, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxField, off, &rows[i].Field); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // Assembly 0x20 type AssemblyTableRow struct { // a 4-byte constant of type AssemblyHashAlgorithm, §II.23.1.1 HashAlgId uint32 `json:"hash_alg_id"` // a 2-byte constant MajorVersion uint16 `json:"major_version"` // a 2-byte constant MinorVersion uint16 `json:"minor_version"` // a 2-byte constant BuildNumber uint16 `json:"build_number"` // a 2-byte constant RevisionNumber uint16 `json:"revision_number"` // a 4-byte bitmask of type AssemblyFlags, §II.23.1.2 Flags uint32 `json:"flags"` // an index into the Blob heap PublicKey uint32 `json:"public_key"` // an index into the String heap Name uint32 `json:"name"` // an index into the String heap Culture uint32 `json:"culture"` } // Assembly 0x20 func (pe *File) parseMetadataAssemblyTable(off uint32) ([]AssemblyTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[Assembly].CountCols) rows := make([]AssemblyTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].HashAlgId, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if rows[i].MajorVersion, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].MinorVersion, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].BuildNumber, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].RevisionNumber, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].PublicKey); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Culture); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // AssemblyProcessor 0x21 type AssemblyProcessorTableRow struct { Processor uint32 `json:"processor"` // a 4-byte constant } // AssemblyOS 0x22 type AssemblyOSTableRow struct { OSPlatformID uint32 `json:"os_platform_id"` // a 4-byte constant OSMajorVersion uint32 `json:"os_major_version"` // a 4-byte constant OSMinorVersion uint32 `json:"os_minor_version"` // a 4-byte constant } // AssemblyRef 0x23 type AssemblyRefTableRow struct { MajorVersion uint16 `json:"major_version"` // a 2-byte constant MinorVersion uint16 `json:"minor_version"` // a 2-byte constant BuildNumber uint16 `json:"build_number"` // a 2-byte constant RevisionNumber uint16 `json:"revision_number"` // a 2-byte constant Flags uint32 `json:"flags"` // a 4-byte bitmask of type AssemblyFlags, §II.23.1.2 PublicKeyOrToken uint32 `json:"public_key_or_token"` // an index into the Blob heap, indicating the public key or token that identifies the author of this Assembly Name uint32 `json:"name"` // an index into the String heap Culture uint32 `json:"culture"` // an index into the String heap HashValue uint32 `json:"hash_value"` // an index into the Blob heap } // AssemblyRef 0x23 func (pe *File) parseMetadataAssemblyRefTable(off uint32) ([]AssemblyRefTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[AssemblyRef].CountCols) rows := make([]AssemblyRefTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].MajorVersion, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].MinorVersion, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].BuildNumber, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].RevisionNumber, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].PublicKeyOrToken); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Culture); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].HashValue); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // AssemblyRefProcessor 0x24 type AssemblyRefProcessorTableRow struct { Processor uint32 `json:"processor"` // a 4-byte constant AssemblyRef uint32 `json:"assembly_ref"` // an index into the AssemblyRef table } // AssemblyRefOS 0x25 type AssemblyRefOSTableRow struct { OSPlatformID uint32 `json:"os_platform_id"` // a 4-byte constant OSMajorVersion uint32 `json:"os_major_version"` // a 4-byte constant OSMinorVersion uint32 `json:"os_minor_version"` // a 4-byte constan) AssemblyRef uint32 `json:"assembly_ref"` // an index into the AssemblyRef table } // ExportedType 0x27 type ExportedTypeTableRow struct { Flags uint32 `json:"flags"` // a 4-byte bitmask of type TypeAttributes, §II.23.1.15 TypeDefId uint32 `json:"type_def_id"` // a 4-byte index into a TypeDef table of another module in this Assembly TypeName uint32 `json:"type_name"` // an index into the String heap TypeNamespace uint32 `json:"type_namespace"` // an index into the String heap Implementation uint32 `json:"implementation"` // an index (more precisely, an Implementation (§II.24.2.6) coded index } // ExportedType 0x27 func (pe *File) parseMetadataExportedTypeTable(off uint32) ([]ExportedTypeTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[ExportedType].CountCols) rows := make([]ExportedTypeTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if rows[i].TypeDefId, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeName); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].TypeNamespace); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxImplementation, off, &rows[i].Implementation); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // ManifestResource 0x28 type ManifestResourceTableRow struct { Offset uint32 `json:"offset"` // a 4-byte constant Flags uint32 `json:"flags"` // a 4-byte bitmask of type ManifestResourceAttributes, §II.23.1.9 Name uint32 `json:"name"` // an index into the String heap Implementation uint32 `json:"implementation"` // an index into a File table, a AssemblyRef table, or null; more precisely, an Implementation (§II.24.2.6) coded index } // ManifestResource 0x28 func (pe *File) parseMetadataManifestResourceTable(off uint32) ([]ManifestResourceTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[ManifestResource].CountCols) rows := make([]ManifestResourceTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Offset, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxImplementation, off, &rows[i].Implementation); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // NestedClass 0x29 type NestedClassTableRow struct { NestedClass uint32 `json:"nested_class"` // an index into the TypeDef table EnclosingClass uint32 `json:"enclosing_class"` // an index into the TypeDef table } // NestedClass 0x29 func (pe *File) parseMetadataNestedClassTable(off uint32) ([]NestedClassTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[NestedClass].CountCols) rows := make([]NestedClassTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].NestedClass); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxTypeDef, off, &rows[i].EnclosingClass); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // GenericParam 0x2a type GenericParamTableRow struct { Number uint16 `json:"number"` // the 2-byte index of the generic parameter, numbered left-to-right, from zero Flags uint16 `json:"flags"` // a 2-byte bitmask of type GenericParamAttributes, §II.23.1.7 Owner uint32 `json:"owner"` // an index into the TypeDef or MethodDef table, specifying the Type or Method to which this generic parameter applies; more precisely, a TypeOrMethodDef (§II.24.2.6) coded index Name uint32 `json:"name"` // a non-null index into the String heap, giving the name for the generic parameter } // GenericParam 0x2a func (pe *File) parseMetadataGenericParamTable(off uint32) ([]GenericParamTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[GenericParam].CountCols) rows := make([]GenericParamTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Number, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if rows[i].Flags, err = pe.ReadUint16(off); err != nil { return rows, n, err } off += 2 n += 2 if indexSize, err = pe.readFromMetadataStream(idxTypeOrMethodDef, off, &rows[i].Owner); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // MethodSpec 0x2b type MethodSpecTableRow struct { Method uint32 `json:"method"` // an index into the MethodDef or MemberRef table, specifying to which generic method this row refers; that is, which generic method this row is an instantiation of; more precisely, a MethodDefOrRef (§II.24.2.6) coded index Instantiation uint32 `json:"instantiation"` // an index into the Blob heap } // MethodSpec 0x2b func (pe *File) parseMetadataMethodSpecTable(off uint32) ([]MethodSpecTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[MethodSpec].CountCols) rows := make([]MethodSpecTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxMethodDefOrRef, off, &rows[i].Method); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].Instantiation); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // GenericParamConstraint 0x2c type GenericParamConstraintTableRow struct { Owner uint32 `json:"owner"` // an index into the GenericParam table, specifying to which generic parameter this row refers Constraint uint32 `json:"constraint"` // an index into the TypeDef, TypeRef, or TypeSpec tables, specifying from which class this generic parameter is constrained to derive; or which interface this generic parameter is constrained to implement; more precisely, a TypeDefOrRef (§II.24.2.6) coded index } // GenericParamConstraint 0x2c func (pe *File) parseMetadataGenericParamConstraintTable(off uint32) ([]GenericParamConstraintTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[GenericParamConstraint].CountCols) rows := make([]GenericParamConstraintTableRow, rowCount) for i := 0; i < rowCount; i++ { if indexSize, err = pe.readFromMetadataStream(idxGenericParam, off, &rows[i].Owner); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxTypeDefOrRef, off, &rows[i].Constraint); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } // File 0x26 type FileTableRow struct { Flags uint32 `json:"flags"` // a 4-byte bitmask of type FileAttributes, §II.23.1.6 Name uint32 `json:"name"` // an index into the String heap HashValue uint32 `json:"hash_value"` // an index into the Blob heap } // File 0x26 func (pe *File) parseMetadataFileTable(off uint32) ([]FileTableRow, uint32, error) { var err error var indexSize uint32 var n uint32 rowCount := int(pe.CLR.MetadataTables[FileMD].CountCols) rows := make([]FileTableRow, rowCount) for i := 0; i < rowCount; i++ { if rows[i].Flags, err = pe.ReadUint32(off); err != nil { return rows, n, err } off += 4 n += 4 if indexSize, err = pe.readFromMetadataStream(idxString, off, &rows[i].Name); err != nil { return rows, n, err } off += indexSize n += indexSize if indexSize, err = pe.readFromMetadataStream(idxBlob, off, &rows[i].HashValue); err != nil { return rows, n, err } off += indexSize n += indexSize } return rows, n, nil } ================================================ FILE: dotnet_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "sort" "strconv" "testing" ) func TestClrDirectoryHeaders(t *testing.T) { type TestClrHeaders struct { clrHeader ImageCOR20Header mdHeader MetadataHeader mdStreamHeaders []MetadataStreamHeader mdTablesStreamHeader MetadataTableStreamHeader } tests := []struct { in string out TestClrHeaders }{ { getAbsoluteFilePath("test/mscorlib.dll"), TestClrHeaders{ clrHeader: ImageCOR20Header{ Cb: 0x48, MajorRuntimeVersion: 0x2, MinorRuntimeVersion: 0x5, MetaData: ImageDataDirectory{ VirtualAddress: 0x2050, Size: 0xae34, }, Flags: 0x9, EntryPointRVAorToken: 0x0, StrongNameSignature: ImageDataDirectory{ VirtualAddress: 0xce84, Size: 0x80, }, }, mdHeader: MetadataHeader{ Signature: 0x424a5342, MajorVersion: 0x1, MinorVersion: 0x1, ExtraData: 0x0, VersionString: 0xc, Version: "v4.0.30319", Flags: 0x0, Streams: 0x5, }, mdStreamHeaders: []MetadataStreamHeader{ { Offset: 0x6c, Size: 0x4c38, Name: "#~", }, { Offset: 0x4ca4, Size: 0x5ed4, Name: "#Strings", }, { Offset: 0xab78, Size: 0x4, Name: "#US", }, { Offset: 0xab7c, Size: 0x10, Name: "#GUID", }, { Offset: 0xab8c, Size: 0x2a8, Name: "#Blob", }, }, mdTablesStreamHeader: MetadataTableStreamHeader{ Reserved: 0x0, MajorVersion: 0x2, MinorVersion: 0x0, Heaps: 0x0, RID: 0x1, MaskValid: 0x8900005407, Sorted: 0x16003301fa00, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryCLR] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryCLR] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseCLRHeaderDirectory(va, size) if err != nil { t.Fatalf("parseCLRHeaderDirectory(%s) failed, reason: %v", tt.in, err) } clr := file.CLR if clr.CLRHeader != tt.out.clrHeader { t.Errorf("CLR header assertion failed, got %v, want %v", clr.CLRHeader, tt.out.clrHeader) } if clr.MetadataHeader != tt.out.mdHeader { t.Errorf("CLR metadata header assertion failed, got %v, want %v", clr.MetadataHeader, tt.out.mdHeader) } if !reflect.DeepEqual(clr.MetadataStreamHeaders, tt.out.mdStreamHeaders) { t.Errorf("CLR metadata stream headers assertion failed, got %v, want %v", clr.MetadataStreamHeaders, tt.out.mdStreamHeaders) } }) } } func TestClrDirectoryMetadataTables(t *testing.T) { type TestClrMetadataTable struct { tableKind int table MetadataTable } tests := []struct { in string out []TestClrMetadataTable }{ { getAbsoluteFilePath("test/mscorlib.dll"), []TestClrMetadataTable{ { tableKind: Module, table: MetadataTable{ Name: "Module", CountCols: 0x1, Content: []ModuleTableRow{ { Generation: 0x0, Name: 0x2cd7, Mvid: 0x1, EncID: 0x0, EncBaseID: 0x0, }, }, }, }, { tableKind: TypeRef, table: MetadataTable{ Name: "TypeRef", CountCols: 19, Content: []TypeRefTableRow{ { ResolutionScope: 0x6, TypeName: 0x22bd, TypeNamespace: 0x4d80, }, }, }, }, { tableKind: MemberRef, table: MetadataTable{ Name: "MemberRef", CountCols: 17, Content: []MemberRefTableRow{ { Class: 0x9, Name: 0x4c76, Signature: 0x1, }, }, }, }, { tableKind: CustomAttribute, table: MetadataTable{ Name: "CustomAttribute", CountCols: 19, Content: []CustomAttributeTableRow{ { Parent: 0x27, Type: 0x83, Value: 0x2a1, }, }, }, }, { tableKind: DeclSecurity, table: MetadataTable{ Name: "DeclSecurity", CountCols: 1, Content: []DeclSecurityTableRow{ { Action: 0x8, Parent: 0x6, PermissionSet: 0x52, }, }, }, }, { tableKind: Assembly, table: MetadataTable{ Name: "Assembly", CountCols: 1, Content: []AssemblyTableRow{ { HashAlgId: 0x8004, MajorVersion: 0x4, MinorVersion: 0x0, BuildNumber: 0x0, RevisionNumber: 0x0, Flags: 0x1, PublicKey: 0x41, Name: 0x704, Culture: 0x0, }, }, }, }, { tableKind: AssemblyRef, table: MetadataTable{ Name: "AssemblyRef", CountCols: 30, Content: []AssemblyRefTableRow{ { MajorVersion: 0x0, MinorVersion: 0x0, BuildNumber: 0x0, RevisionNumber: 0x0, Flags: 0x0, PublicKeyOrToken: 0x26, Name: 0x6ed, Culture: 0x0, HashValue: 0x0, }, }, }, }, { tableKind: ExportedType, table: MetadataTable{ Name: "ExportedType", CountCols: 1319, Content: []ExportedTypeTableRow{ { Flags: 0x200000, TypeDefId: 0x0, TypeName: 0x5d85, TypeNamespace: 0x316, Implementation: 0x9, }, }, }, }, }, }, { getAbsoluteFilePath("test/pspluginwkr.dll"), []TestClrMetadataTable{ { tableKind: Module, table: MetadataTable{ Name: "Module", CountCols: 0x1, Content: []ModuleTableRow{ { Generation: 0x0, Name: 0x8bdf, Mvid: 0x1, EncID: 0x0, EncBaseID: 0x0, }, }, }, }, { tableKind: TypeRef, table: MetadataTable{ Name: "TypeRef", CountCols: 140, Content: []TypeRefTableRow{ { ResolutionScope: 0x6, TypeName: 0x1103, TypeNamespace: 0x1113, }, }, }, }, { tableKind: TypeDef, table: MetadataTable{ Name: "TypeDef", CountCols: 169, Content: []TypeDefTableRow{ { Flags: 0x0, TypeName: 0x1, TypeNamespace: 0x0, Extends: 0x0, FieldList: 0x1, MethodList: 0x1, }, }, }, }, { tableKind: Field, table: MetadataTable{ Name: "Field", CountCols: 325, Content: []FieldTableRow{ { Flags: 0x113, Name: 0x4af1, Signature: 0xea9, }, }, }, }, { tableKind: MethodDef, table: MetadataTable{ Name: "MethodDef", CountCols: 434, Content: []MethodDefTableRow{ { RVA: 0x1d414, ImplFlags: 0x0, Flags: 0x13, Name: 0x1b7f, Signature: 0x125, ParamList: 0x1, }, }, }, }, { tableKind: Param, table: MetadataTable{ Name: "Param", CountCols: 679, Content: []ParamTableRow{ { Flags: 0x2000, Sequence: 0x0, Name: 0x0, }, }, }, }, { tableKind: InterfaceImpl, table: MetadataTable{ Name: "InterfaceImpl", CountCols: 3, Content: []InterfaceImplTableRow{ { Class: 0x6c, Interface: 0xa9, }, }, }, }, { tableKind: MemberRef, table: MetadataTable{ Name: "MemberRef", CountCols: 256, Content: []MemberRefTableRow{ { Class: 0x29, Name: 0x79f8, Signature: 0x11e2, }, }, }, }, { tableKind: Constant, table: MetadataTable{ Name: "Constant", CountCols: 2, Content: []ConstantTableRow{ { Type: 0xe, Parent: 0x464, Value: 0x1aa8, }, }, }, }, { tableKind: CustomAttribute, table: MetadataTable{ Name: "CustomAttribute", CountCols: 622, Content: []CustomAttributeTableRow{ { Parent: 0x2e, Type: 0x7db, Value: 0x2c02, }, }, }, }, { tableKind: FieldMarshal, table: MetadataTable{ Name: "FieldMarshal", CountCols: 33, Content: []FieldMarshalTableRow{ { Parent: 0x3, NativeType: 0x1ca6, }, }, }, }, { tableKind: DeclSecurity, table: MetadataTable{ Name: "DeclSecurity", CountCols: 4, Content: []DeclSecurityTableRow{ { Action: 0x8, Parent: 0x6, PermissionSet: 0x2d81, }, }, }, }, { tableKind: ClassLayout, table: MetadataTable{ Name: "ClassLayout", CountCols: 144, Content: []ClassLayoutTableRow{ { PackingSize: 0x0, ClassSize: 0x10, Parent: 0x2, }, }, }, }, { tableKind: StandAloneSig, table: MetadataTable{ Name: "StandAloneSig", CountCols: 358, Content: []StandAloneSigTableRow{ { Signature: 0x1caa, }, }, }, }, { tableKind: EventMap, table: MetadataTable{ Name: "EventMap", CountCols: 2, Content: []EventMapTableRow{ { Parent: 0x7f, EventList: 0x1, }, }, }, }, { tableKind: Event, table: MetadataTable{ Name: "Event", CountCols: 2, Content: []EventTableRow{ { EventFlags: 0x200, Name: 0x7eeb, EventType: 0x16, }, }, }, }, { tableKind: PropertyMap, table: MetadataTable{ Name: "PropertyMap", CountCols: 2, Content: []PropertyMapTableRow{ { Parent: 0x49, PropertyList: 0x1, }, }, }, }, { tableKind: Property, table: MetadataTable{ Name: "Property", CountCols: 2, Content: []PropertyTableRow{ { Flags: 0x0, Name: 0x7a8a, Type: 0x11d7, }, }, }, }, { tableKind: MethodSemantics, table: MetadataTable{ Name: "MethodSemantics", CountCols: 9, Content: []MethodSemanticsTableRow{ { Semantics: 0x10, Method: 0x153, Association: 0x2, }, }, }, }, { tableKind: ModuleRef, table: MetadataTable{ Name: "ModuleRef", CountCols: 1, Content: []ModuleRefTableRow{ { Name: 0x0, }, }, }, }, { tableKind: TypeSpec, table: MetadataTable{ Name: "TypeSpec", CountCols: 17, Content: []TypeSpecTableRow{ { Signature: 0x85, }, }, }, }, { tableKind: ImplMap, table: MetadataTable{ Name: "ImplMap", CountCols: 51, Content: []ImplMapTableRow{ { MappingFlags: 0x240, MemberForwarded: 0x1cb, ImportName: 0x0, ImportScope: 0x1, }, }, }, }, { tableKind: FieldRVA, table: MetadataTable{ Name: "FieldRVA", CountCols: 265, Content: []FieldRVATableRow{ { RVA: 0x11e4, Field: 0x1, }, }, }, }, { tableKind: Assembly, table: MetadataTable{ Name: "Assembly", CountCols: 1, Content: []AssemblyTableRow{ { HashAlgId: 0x8004, MajorVersion: 0x1, MinorVersion: 0x0, BuildNumber: 0x0, RevisionNumber: 0x0, Flags: 0x1, PublicKey: 0x2b03, Name: 0x8bd3, Culture: 0x0, }, }, }, }, { tableKind: AssemblyRef, table: MetadataTable{ Name: "AssemblyRef", CountCols: 5, Content: []AssemblyRefTableRow{ { MajorVersion: 0x2, MinorVersion: 0x0, BuildNumber: 0x0, RevisionNumber: 0x0, Flags: 0x0, PublicKeyOrToken: 0x1, Name: 0x10b9, Culture: 0x0, HashValue: 0xa, }, }, }, }, { tableKind: NestedClass, table: MetadataTable{ Name: "NestedClass", CountCols: 7, Content: []NestedClassTableRow{ { NestedClass: 0x7, EnclosingClass: 0x6, }, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryCLR] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryCLR] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseCLRHeaderDirectory(va, size) if err != nil { t.Fatalf("parseCLRHeaderDirectory(%s) failed, reason: %v", tt.in, err) } clr := file.CLR for _, tbl := range tt.out { mdTable := clr.MetadataTables[tbl.tableKind] if mdTable.CountCols != tbl.table.CountCols { t.Errorf("CLR metadata tables assertion failed on %s table, got %v, want %v", tbl.table.Name, mdTable.CountCols, tbl.table.CountCols) } if mdTable.Name != tbl.table.Name { t.Errorf("CLR metadata tables assertion failed on %s table, got %v, want %v", tbl.table.Name, mdTable.Name, tbl.table) } var got, want interface{} switch mdTable.Content.(type) { case []ModuleTableRow: got = mdTable.Content.([]ModuleTableRow)[0] want = tbl.table.Content.([]ModuleTableRow)[0] case []TypeRefTableRow: got = mdTable.Content.([]TypeRefTableRow)[0] want = tbl.table.Content.([]TypeRefTableRow)[0] case []TypeDefTableRow: got = mdTable.Content.([]TypeDefTableRow)[0] want = tbl.table.Content.([]TypeDefTableRow)[0] case []MemberRefTableRow: got = mdTable.Content.([]MemberRefTableRow)[0] want = tbl.table.Content.([]MemberRefTableRow)[0] case []CustomAttributeTableRow: got = mdTable.Content.([]CustomAttributeTableRow)[0] want = tbl.table.Content.([]CustomAttributeTableRow)[0] case []DeclSecurityTableRow: got = mdTable.Content.([]DeclSecurityTableRow)[0] want = tbl.table.Content.([]DeclSecurityTableRow)[0] case []AssemblyTableRow: got = mdTable.Content.([]AssemblyTableRow)[0] want = tbl.table.Content.([]AssemblyTableRow)[0] case []AssemblyRefTableRow: got = mdTable.Content.([]AssemblyRefTableRow)[0] want = tbl.table.Content.([]AssemblyRefTableRow)[0] case []ExportedTypeTableRow: got = mdTable.Content.([]ExportedTypeTableRow)[0] want = tbl.table.Content.([]ExportedTypeTableRow)[0] case []FieldTableRow: got = mdTable.Content.([]FieldTableRow)[0] want = tbl.table.Content.([]FieldTableRow)[0] case []MethodDefTableRow: got = mdTable.Content.([]MethodDefTableRow)[0] want = tbl.table.Content.([]MethodDefTableRow)[0] case []ParamTableRow: got = mdTable.Content.([]ParamTableRow)[0] want = tbl.table.Content.([]ParamTableRow)[0] case []InterfaceImplTableRow: got = mdTable.Content.([]InterfaceImplTableRow)[0] want = tbl.table.Content.([]InterfaceImplTableRow)[0] case []ConstantTableRow: got = mdTable.Content.([]ConstantTableRow)[0] want = tbl.table.Content.([]ConstantTableRow)[0] case []FieldMarshalTableRow: got = mdTable.Content.([]FieldMarshalTableRow)[0] want = tbl.table.Content.([]FieldMarshalTableRow)[0] case []ClassLayoutTableRow: got = mdTable.Content.([]ClassLayoutTableRow)[0] want = tbl.table.Content.([]ClassLayoutTableRow)[0] case []StandAloneSigTableRow: got = mdTable.Content.([]StandAloneSigTableRow)[0] want = tbl.table.Content.([]StandAloneSigTableRow)[0] case []EventMapTableRow: got = mdTable.Content.([]EventMapTableRow)[0] want = tbl.table.Content.([]EventMapTableRow)[0] case []EventTableRow: got = mdTable.Content.([]EventTableRow)[0] want = tbl.table.Content.([]EventTableRow)[0] case []PropertyMapTableRow: got = mdTable.Content.([]PropertyMapTableRow)[0] want = tbl.table.Content.([]PropertyMapTableRow)[0] case []PropertyTableRow: got = mdTable.Content.([]PropertyTableRow)[0] want = tbl.table.Content.([]PropertyTableRow)[0] case []MethodSemanticsTableRow: got = mdTable.Content.([]MethodSemanticsTableRow)[0] want = tbl.table.Content.([]MethodSemanticsTableRow)[0] case []ModuleRefTableRow: got = mdTable.Content.([]ModuleRefTableRow)[0] want = tbl.table.Content.([]ModuleRefTableRow)[0] case []TypeSpecTableRow: got = mdTable.Content.([]TypeSpecTableRow)[0] want = tbl.table.Content.([]TypeSpecTableRow)[0] case []ImplMapTableRow: got = mdTable.Content.([]ImplMapTableRow)[0] want = tbl.table.Content.([]ImplMapTableRow)[0] case []FieldRVATableRow: got = mdTable.Content.([]FieldRVATableRow)[0] want = tbl.table.Content.([]FieldRVATableRow)[0] case []NestedClassTableRow: got = mdTable.Content.([]NestedClassTableRow)[0] want = tbl.table.Content.([]NestedClassTableRow)[0] default: got = "bad type" want = "good type" } if !reflect.DeepEqual(got, want) { t.Errorf("CLR metadata tables assertion failed on %s table, got %v, want %v", tbl.table.Name, got, want) } } }) } } func TestClrDirectorCOMImageFlagsType(t *testing.T) { tests := []struct { in int out []string }{ { 0x9, []string{"IL Only", "Strong Name Signed"}, }, } for _, tt := range tests { t.Run("CaseFlagsEqualTo_"+strconv.Itoa(tt.in), func(t *testing.T) { got := COMImageFlagsType(tt.in).String() sort.Strings(got) sort.Strings(tt.out) if !reflect.DeepEqual(got, tt.out) { t.Errorf("CLR header flags assertion failed, got %v, want %v", got, tt.out) } }) } } func TestClrDirectoryMalformed(t *testing.T) { tests := []struct { name string in string }{ { name: "malformed_dotnet_binary", in: getAbsoluteFilePath("test/04521ac8297cabd58e62e86039f6874a06753362dac4edc56bbf6d4655bb67bd"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } // Should not panic, and should continue parsing past the // malformed metadata stream. err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } // Verify that streams with out-of-bounds sizes get // an empty byte slice instead of causing a panic. for _, name := range []string{"#Strings", "#US", "#GUID", "#Blob"} { data, ok := file.CLR.MetadataStreams[name] if !ok { t.Errorf("expected stream %q to be present", name) } if len(data) != 0 { t.Errorf("expected stream %q to be empty, got len=%d", name, len(data)) } } }) } } ================================================ FILE: exception.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "strconv" ) const ( // Unwind information flags. // UnwFlagNHandler - The function has no handler. UnwFlagNHandler = uint8(0x0) // UnwFlagEHandler - The function has an exception handler that should // be called when looking for functions that need to examine exceptions. UnwFlagEHandler = uint8(0x1) // UnwFlagUHandler - The function has a termination handler that should // be called when unwinding an exception. UnwFlagUHandler = uint8(0x2) // UnwFlagChainInfo - This unwind info structure is not the primary one // for the procedure. Instead, the chained unwind info entry is the contents // of a previous RUNTIME_FUNCTION entry. For information, see Chained unwind // info structures. If this flag is set, then the UNW_FLAG_EHANDLER and // UNW_FLAG_UHANDLER flags must be cleared. Also, the frame register and // fixed-stack allocation field must have the same values as in the primary // unwind info. UnwFlagChainInfo = uint8(0x4) ) // The meaning of the operation info bits depends upon the operation code. // To encode a general-purpose (integer) register, this mapping is used: const ( rax = iota rcx rdx rbx rsp rbp rsi rdi r8 r9 r10 r11 r12 r13 r14 r15 ) // OpInfoRegisters maps registers to string. var OpInfoRegisters = map[uint8]string{ rax: "RAX", rcx: "RCX", rdx: "RDX", rbx: "RBX", rsp: "RSP", rbp: "RBP", rsi: "RSI", rdi: "RDI", r8: "R8", r9: "R9", r10: "R10", r11: "R11", r12: "R12", r13: "R13", r14: "R14", r15: "R15", } // UnwindOpType represents the type of an unwind opcode. type UnwindOpType uint8 // _UNWIND_OP_CODES const ( // Push a nonvolatile integer register, decrementing RSP by 8. The // operation info is the number of the register. Because of the constraints // on epilogs, UWOP_PUSH_NONVOL unwind codes must appear first in the // prolog and correspondingly, last in the unwind code array. This relative // ordering applies to all other unwind codes except UWOP_PUSH_MACHFRAME. UwOpPushNonVol = UnwindOpType(0) // Allocate a large-sized area on the stack. There are two forms. If the // operation info equals 0, then the size of the allocation divided by 8 is // recorded in the next slot, allowing an allocation up to 512K - 8. If the // operation info equals 1, then the unscaled size of the allocation is // recorded in the next two slots in little-endian format, allowing // allocations up to 4GB - 8. UwOpAllocLarge = UnwindOpType(1) // Allocate a small-sized area on the stack. The size of the allocation is // the operation info field * 8 + 8, allowing allocations from 8 to 128 // bytes. UwOpAllocSmall = UnwindOpType(2) // Establish the frame pointer register by setting the register to some // offset of the current RSP. The offset is equal to the Frame Register // offset (scaled) field in the UNWIND_INFO * 16, allowing offsets from 0 // to 240. The use of an offset permits establishing a frame pointer that // points to the middle of the fixed stack allocation, helping code density // by allowing more accesses to use short instruction forms. The operation // info field is reserved and shouldn't be used. UwOpSetFpReg = UnwindOpType(3) // Save a nonvolatile integer register on the stack using a MOV instead of // a PUSH. This code is primarily used for shrink-wrapping, where a // nonvolatile register is saved to the stack in a position that was // previously allocated. The operation info is the number of the register. // The scaled-by-8 stack offset is recorded in the next unwind operation // code slot, as described in the note above. UwOpSaveNonVol = UnwindOpType(4) // Save a nonvolatile integer register on the stack with a long offset, // using a MOV instead of a PUSH. This code is primarily used for // shrink-wrapping, where a nonvolatile register is saved to the stack in a // position that was previously allocated. The operation info is the number // of the register. The unscaled stack offset is recorded in the next two // unwind operation code slots, as described in the note above. UwOpSaveNonVolFar = UnwindOpType(5) // For version 1 of the UNWIND_INFO structure, this code was called // UWOP_SAVE_XMM and occupied 2 records, it retained the lower 64 bits of // the XMM register, but was later removed and is now skipped. In practice, // this code has never been used. // For version 2 of the UNWIND_INFO structure, this code is called // UWOP_EPILOG, takes 2 entries, and describes the function epilogue. UwOpEpilog = UnwindOpType(6) // For version 1 of the UNWIND_INFO structure, this code was called // UWOP_SAVE_XMM_FAR and occupied 3 records, it saved the lower 64 bits of // the XMM register, but was later removed and is now skipped. In practice, // this code has never been used. // For version 2 of the UNWIND_INFO structure, this code is called // UWOP_SPARE_CODE, takes 3 entries, and makes no sense. UwOpSpareCode = UnwindOpType(7) // Save all 128 bits of a nonvolatile XMM register on the stack. The // operation info is the number of the register. The scaled-by-16 stack // offset is recorded in the next slot. UwOpSaveXmm128 = UnwindOpType(8) // Save all 128 bits of a nonvolatile XMM register on the stack with a long // offset. The operation info is the number of the register. The unscaled // stack offset is recorded in the next two slots. UwOpSaveXmm128Far = UnwindOpType(9) // Push a machine frame. This unwind code is used to record the effect of a // hardware interrupt or exception. UwOpPushMachFrame = UnwindOpType(10) // UWOP_SET_FPREG_LARGE is a CLR Unix-only extension to the Windows AMD64 // unwind codes. It is not part of the standard Windows AMD64 unwind codes // specification. UWOP_SET_FPREG allows for a maximum of a 240 byte offset // between RSP and the frame pointer, when the frame pointer is // established. UWOP_SET_FPREG_LARGE has a 32-bit range scaled by 16. When // UWOP_SET_FPREG_LARGE is used, UNWIND_INFO.FrameRegister must be set to // the frame pointer register, and UNWIND_INFO.FrameOffset must be set to // 15 (its maximum value). UWOP_SET_FPREG_LARGE is followed by two // UNWIND_CODEs that are combined to form a 32-bit offset (the same as // UWOP_SAVE_NONVOL_FAR). This offset is then scaled by 16. The result must // be less than 2^32 (that is, the top 4 bits of the unscaled 32-bit number // must be zero). This result is used as the frame pointer register offset // from RSP at the time the frame pointer is established. Either // UWOP_SET_FPREG or UWOP_SET_FPREG_LARGE can be used, but not both. UwOpSetFpRegLarge = UnwindOpType(11) ) // ImageRuntimeFunctionEntry represents an entry in the function table on 64-bit // Windows (IMAGE_RUNTIME_FUNCTION_ENTRY). Table-based exception handling request // a table entry for all functions that allocate stack space or call another // function (for example, non-leaf functions). type ImageRuntimeFunctionEntry struct { // The address of the start of the function. BeginAddress uint32 `json:"begin_address"` // The address of the end of the function. EndAddress uint32 `json:"end_address"` // The unwind data info structure is used to record the effects a function // has on the stack pointer, and where the nonvolatile registers are saved // on the stack. UnwindInfoAddress uint32 `json:"unwind_info_address"` } // ImageARMRuntimeFunctionEntry represents the function table entry for the ARM // platform. type ImageARMRuntimeFunctionEntry struct { // Function Start RVA is the 32-bit RVA of the start of the function. If // the function contains thumb code, the low bit of this address must be set. BeginAddress uint32 `bitfield:",functionstart" json:"begin_address"` // Flag is a 2-bit field that indicates how to interpret the remaining // 30 bits of the second .pdata word. If Flag is 0, then the remaining bits // form an Exception Information RVA (with the low two bits implicitly 0). // If Flag is non-zero, then the remaining bits form a Packed Unwind Data // structure. Flag uint8 `json:"flag"` /* Exception Information RVA or Packed Unwind Data. Exception Information RVA is the address of the variable-length exception information structure, stored in the .xdata section. This data must be 4-byte aligned. Packed Unwind Data is a compressed description of the operations required to unwind from a function, assuming a canonical form. In this case, no .xdata record is required. */ ExceptionFlag uint32 `json:"exception_flag"` } // UnwindCode is used to record the sequence of operations in the prolog that // affect the nonvolatile registers and RSP. Each code item has this format: /* typedef union _UNWIND_CODE { struct { UCHAR CodeOffset; UCHAR UnwindOp : 4; UCHAR OpInfo : 4; } DUMMYUNIONNAME; struct { UCHAR OffsetLow; UCHAR UnwindOp : 4; UCHAR OffsetHigh : 4; } EpilogueCode; USHORT FrameOffset; } UNWIND_CODE, *PUNWIND_CODE;*/ // // It provides information about the amount of stack space allocated, the location // of saved non-volatile registers, and whether or not a frame register is used // and what relation it has to the rest of the stack. type UnwindCode struct { // Offset (from the beginning of the prolog) of the end of the instruction // that performs is operation, plus 1 (that is, the offset of the start of // the next instruction). CodeOffset uint8 `json:"code_offset"` // The unwind operation code. UnwindOp UnwindOpType `json:"unwind_op"` // Operation info. OpInfo uint8 `json:"op_info"` // Allocation size. Operand string `json:"operand"` FrameOffset uint16 `json:"frame_offset"` } // UnwindInfo represents the _UNWIND_INFO structure. It is used to record the // effects a function has on the stack pointer, and where the nonvolatile // registers are saved on the stack. type UnwindInfo struct { // (3 bits) Version number of the unwind data, currently 1 and 2. Version uint8 `json:"version"` // (5 bits) Three flags are currently defined above. Flags uint8 `json:"flags"` // Length of the function prolog in bytes. SizeOfProlog uint8 `json:"size_of_prolog"` // The number of slots in the unwind codes array. Some unwind codes, // for example, UWOP_SAVE_NONVOL, require more than one slot in the array. CountOfCodes uint8 `json:"count_of_codes"` // If nonzero, then the function uses a frame pointer (FP), and this field // is the number of the nonvolatile register used as the frame pointer, // using the same encoding for the operation info field of UNWIND_CODE nodes. FrameRegister uint8 `json:"frame_register"` // If the frame register field is nonzero, this field is the scaled offset // from RSP that is applied to the FP register when it's established. The // actual FP register is set to RSP + 16 * this number, allowing offsets // from 0 to 240. This offset permits pointing the FP register into the // middle of the local stack allocation for dynamic stack frames, allowing // better code density through shorter instructions. (That is, more // instructions can use the 8-bit signed offset form.) FrameOffset uint8 `json:"frame_offset"` // An array of items that explains the effect of the prolog on the // nonvolatile registers and RSP. See the section on UNWIND_CODE for the // meanings of individual items. For alignment purposes, this array always // has an even number of entries, and the final entry is potentially // unused. In that case, the array is one longer than indicated by the // count of unwind codes field. UnwindCodes []UnwindCode `json:"unwind_codes"` // Address of exception handler when UNW_FLAG_EHANDLER is set. ExceptionHandler uint32 `json:"exception_handler"` // If flag UNW_FLAG_CHAININFO is set, then the UNWIND_INFO structure ends // with three UWORDs. These UWORDs represent the RUNTIME_FUNCTION // information for the function of the chained unwind. FunctionEntry ImageRuntimeFunctionEntry `json:"function_entry"` } // // The unwind codes are followed by an optional DWORD aligned field that // contains the exception handler address or the address of chained unwind // information. If an exception handler address is specified, then it is // followed by the language specified exception handler data. // // union { // ULONG ExceptionHandler; // ULONG FunctionEntry; // }; // // ULONG ExceptionData[]; // type ScopeRecord struct { // This value indicates the offset of the first instruction within a __try // block located in the function. BeginAddress uint32 `json:"begin_address"` // This value indicates the offset to the instruction after the last // instruction within the __try block (conceptually the __except statement). EndAddress uint32 `json:"end_address"` // This value indicates the offset to the function located within the // parentheses of the __except() statement. In the documentation you'll // find this routine called the "exception handler" or "exception filter". HandlerAddress uint32 `json:"handler_address"` // This value indicates the offset to the first instruction in the __except // block associated with the __try block. JumpTarget uint32 `json:"jump_target"` } // ScopeTable represents a variable length structure containing a count followed // by Count "scope records". While the RUNTIME_FUNCTION describes the entire range // of a function that contains SEH, the SCOPE_TABLE describes each of the individual // __try/__except blocks within the function. type ScopeTable struct { // The count of scope records. Count uint32 `json:"count"` // A array of scope record. ScopeRecords []ScopeRecord `json:"scope_records"` } //  typedef struct _SCOPE_TABLE { // ULONG Count; // struct // { // ULONG BeginAddress; // ULONG EndAddress; // ULONG HandlerAddress; // ULONG JumpTarget; // } ScopeRecord[1]; //  } SCOPE_TABLE, *PSCOPE_TABLE; // Exception represent an entry in the function table. type Exception struct { RuntimeFunction ImageRuntimeFunctionEntry `json:"runtime_function"` UnwindInfo UnwindInfo `json:"unwind_info"` } func (pe *File) parseUnwindCode(offset uint32, version uint8) (UnwindCode, int) { unwindCode := UnwindCode{} advanceBy := 0 // Read the unwind code at offset (2 bytes) uc, err := pe.ReadUint16(offset) if err != nil { return unwindCode, advanceBy } unwindCode.CodeOffset = uint8(uc & 0xff) unwindCode.UnwindOp = UnwindOpType(uc & 0xf00 >> 8) unwindCode.OpInfo = uint8(uc & 0xf000 >> 12) switch unwindCode.UnwindOp { case UwOpAllocSmall: size := int(unwindCode.OpInfo*8 + 8) unwindCode.Operand = "Size=" + strconv.Itoa(size) advanceBy++ case UwOpAllocLarge: if unwindCode.OpInfo == 0 { size := int(binary.LittleEndian.Uint16(pe.data[offset+2:]) * 8) unwindCode.Operand = "Size=" + strconv.Itoa(size) advanceBy += 2 } else { size := int(binary.LittleEndian.Uint32(pe.data[offset+2:]) << 16) unwindCode.Operand = "Size=" + strconv.Itoa(size) advanceBy += 3 } case UwOpSetFpReg: unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] advanceBy++ case UwOpPushNonVol: unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] advanceBy++ case UwOpSaveNonVol: fo := binary.LittleEndian.Uint16(pe.data[offset+2:]) unwindCode.FrameOffset = fo * 8 unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) advanceBy += 2 case UwOpSaveNonVolFar: fo := binary.LittleEndian.Uint32(pe.data[offset+2:]) unwindCode.FrameOffset = uint16(fo * 8) unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) advanceBy += 3 case UwOpSaveXmm128: fo := binary.LittleEndian.Uint16(pe.data[offset+2:]) unwindCode.FrameOffset = fo * 16 unwindCode.Operand = "Register=XMM" + strconv.Itoa(int(unwindCode.OpInfo)) + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) advanceBy += 2 case UwOpSaveXmm128Far: fo := binary.LittleEndian.Uint32(pe.data[offset+2:]) unwindCode.FrameOffset = uint16(fo) unwindCode.Operand = "Register=XMM" + strconv.Itoa(int(unwindCode.OpInfo)) + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) advanceBy += 3 case UwOpSetFpRegLarge: unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] advanceBy += 2 case UwOpPushMachFrame: advanceBy++ case UwOpEpilog: if version == 2 { unwindCode.Operand = "Flags=" + strconv.Itoa(int(unwindCode.OpInfo)) + ", Size=" + strconv.Itoa(int(unwindCode.CodeOffset)) } advanceBy += 2 case UwOpSpareCode: advanceBy += 3 default: advanceBy++ // so we can get out of the loop pe.logger.Warnf("Wrong unwind opcode %d", unwindCode.UnwindOp) } return unwindCode, advanceBy } func (pe *File) parseUnwindInfo(unwindInfo uint32) UnwindInfo { ui := UnwindInfo{} offset := pe.GetOffsetFromRva(unwindInfo) v, err := pe.ReadUint32(offset) if err != nil { return ui } // The lowest 3 bits ui.Version = uint8(v & 0x7) // The next 5 bits. ui.Flags = uint8(v & 0xf8 >> 3) // The next byte ui.SizeOfProlog = uint8(v & 0xff00 >> 8) // The next byte ui.CountOfCodes = uint8(v & 0xff0000 >> 16) // The next 4 bits ui.FrameRegister = uint8(v & 0xf00000 >> 24) // The next 4 bits. ui.FrameOffset = uint8(v&0xf0000000>>28) * 6 // Each unwind code struct is 2 bytes wide. offset += 4 i := 0 for i < int(ui.CountOfCodes) { ucOffset := offset + 2*uint32(i) unwindCode, advanceBy := pe.parseUnwindCode(ucOffset, ui.Version) if advanceBy == 0 { return ui } ui.UnwindCodes = append(ui.UnwindCodes, unwindCode) i += advanceBy } if ui.CountOfCodes&1 == 1 { offset += 2 } // An image-relative pointer to either the function's language-specific // exception or termination handler, if flag UNW_FLAG_CHAININFO is clear // and one of the flags UNW_FLAG_EHADLER or UNW_FLAG_UHANDLER is set. if ui.Flags&UnwFlagEHandler != 0 || ui.Flags&UnwFlagUHandler != 0 { if ui.Flags&UnwFlagChainInfo == 0 { handlerOffset := offset + 2*uint32(i) ui.ExceptionHandler = binary.LittleEndian.Uint32(pe.data[handlerOffset:]) } } // If the UNW_FLAG_CHAININFO flag is set, then an unwind info structure // is a secondary one, and the shared exception-handler/chained-info // address field contains the primary unwind information. This sample // code retrieves the primary unwind information, assuming that unwindInfo // is the structure that has the UNW_FLAG_CHAININFO flag set. if ui.Flags&UnwFlagChainInfo != 0 { chainOffset := offset + 2*uint32(i) rf := ImageRuntimeFunctionEntry{} size := uint32(binary.Size(ImageRuntimeFunctionEntry{})) err := pe.structUnpack(&rf, chainOffset, size) if err != nil { return ui } ui.FunctionEntry = rf } return ui } // Exception directory contains an array of function table entries that are used // for exception handling. func (pe *File) parseExceptionDirectory(rva, size uint32) error { // The target platform determines which format of the function table entry // to use. var exceptions []Exception fileOffset := pe.GetOffsetFromRva(rva) entrySize := uint32(binary.Size(ImageRuntimeFunctionEntry{})) entriesCount := size / entrySize for i := uint32(0); i < entriesCount; i++ { functionEntry := ImageRuntimeFunctionEntry{} offset := fileOffset + (entrySize * i) err := pe.structUnpack(&functionEntry, offset, entrySize) if err != nil { return err } exception := Exception{RuntimeFunction: functionEntry} if pe.Is64 { exception.UnwindInfo = pe.parseUnwindInfo(functionEntry.UnwindInfoAddress) } exceptions = append(exceptions, exception) } pe.Exceptions = exceptions if len(exceptions) > 0 { pe.HasException = true } return nil } // PrettyUnwindInfoHandlerFlags returns the string representation of the // `flags` field of the unwind info structure. func PrettyUnwindInfoHandlerFlags(flags uint8) []string { var values []string unwFlagHandlerMap := map[uint8]string{ UnwFlagNHandler: "No Handler", UnwFlagEHandler: "Exception", UnwFlagUHandler: "Termination", UnwFlagChainInfo: "Chain", } for k, s := range unwFlagHandlerMap { if k&flags != 0 { values = append(values, s) } } return values } // String returns the string representation of the an unwind opcode. func (uo UnwindOpType) String() string { unOpToString := map[UnwindOpType]string{ UwOpPushNonVol: "UWOP_PUSH_NONVOL", UwOpAllocLarge: "UWOP_ALLOC_LARE", UwOpAllocSmall: "UWOP_ALLOC_SMALL", UwOpSetFpReg: "UWOP_SET_FPREG", UwOpSaveNonVol: "UWOP_SAVE_NONVOL", UwOpSaveNonVolFar: "UWOP_SAVE_NONVOL_FAR", UwOpEpilog: "UWOP_EPILOG", UwOpSpareCode: "UWOP_SPARE_CODE", UwOpSaveXmm128: "UWOP_SAVE_XMM128", UwOpSaveXmm128Far: "UWOP_SAVE_XMM128_FAR", UwOpPushMachFrame: "UWOP_PUSH_MACHFRAME", UwOpSetFpRegLarge: "UWOP_SET_FPREG_LARGE", } if val, ok := unOpToString[uo]; ok { return val } return "?" } ================================================ FILE: exception_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "strconv" "testing" ) type TestExceptionEntry struct { entryCount int entryIndex int runtimeFunc ImageRuntimeFunctionEntry unwindInfo UnwindInfo } func TestParseExceptionDirectory(t *testing.T) { tests := []struct { in string out TestExceptionEntry }{ { getAbsoluteFilePath("test/kernel32.dll"), TestExceptionEntry{ entryCount: 1835, entryIndex: 0, runtimeFunc: ImageRuntimeFunctionEntry{ BeginAddress: 0x1010, EndAddress: 0x1053, UnwindInfoAddress: 0x938b8, }, unwindInfo: UnwindInfo{ Version: 0x1, Flags: 0x0, SizeOfProlog: 0x7, CountOfCodes: 0x1, FrameRegister: 0x0, FrameOffset: 0x0, UnwindCodes: []UnwindCode{ { CodeOffset: 0x07, UnwindOp: 0x2, OpInfo: 0x8, Operand: "Size=72", FrameOffset: 0x0, }, }, }, }, }, { // fake exception directory getAbsoluteFilePath("test/0585495341e0ffaae1734acb78708ff55cd3612d844672d37226ef63d12652d0"), TestExceptionEntry{ entryCount: 3349, entryIndex: 0, runtimeFunc: ImageRuntimeFunctionEntry{ BeginAddress: 0xf860617, EndAddress: 0x205fef60, UnwindInfoAddress: 0x2c0365b4, }, unwindInfo: UnwindInfo{}, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryException] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryException] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseExceptionDirectory(va, size) if err != nil { t.Fatalf("parseExceptionDirectory(%s) failed, reason: %v", tt.in, err) } got := file.Exceptions if len(got) != tt.out.entryCount { t.Errorf("Exception entry count assertion failed, got %v, want %v", len(got), tt.out.entryCount) } runtimeFunc := file.Exceptions[tt.out.entryIndex].RuntimeFunction if runtimeFunc != tt.out.runtimeFunc { t.Errorf("RuntimeFunction assertion failed, got %v, want %v", len(got), tt.out.entryCount) } unwindInfo := file.Exceptions[tt.out.entryIndex].UnwindInfo if !reflect.DeepEqual(unwindInfo, tt.out.unwindInfo) { t.Errorf("UnwindInfo assertion failed, got %v, want %v", unwindInfo, tt.out.unwindInfo) } }) } } func TestExceptionDirectoryUnwindOpcode(t *testing.T) { tests := []struct { in UnwindOpType out string }{ { UwOpPushNonVol, "UWOP_PUSH_NONVOL", }, { UnwindOpType(0xff), "?", }, } for _, tt := range tests { name := "CaseUnwindOpcodeEqualTo_" + strconv.Itoa(int(tt.in)) t.Run(name, func(t *testing.T) { got := tt.in.String() if got != tt.out { t.Errorf("unwind opcode string interpretation, got %v, want %v", got, tt.out) } }) } } ================================================ FILE: exports.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "errors" "fmt" ) const ( maxExportedSymbols = 0x2000 ) var ( ErrExportMaxOrdEntries = "Export directory contains more than max ordinal entries" ErrExportManyRepeatedEntries = "Export directory contains many repeated entries" AnoNullNumberOfFunctions = "Export directory contains zero number of functions" AnoNullAddressOfFunctions = "Export directory contains zero address of functions" ) // ImageExportDirectory represents the IMAGE_EXPORT_DIRECTORY structure. // The export directory table contains address information that is used // to resolve imports to the entry points within this image. type ImageExportDirectory struct { // Reserved, must be 0. Characteristics uint32 `json:"characteristics"` // The time and date that the export data was created. TimeDateStamp uint32 `json:"time_date_stamp"` // The major version number. //The major and minor version numbers can be set by the user. MajorVersion uint16 `json:"major_version"` // The minor version number. MinorVersion uint16 `json:"minor_version"` // The address of the ASCII string that contains the name of the DLL. // This address is relative to the image base. Name uint32 `json:"name"` // The starting ordinal number for exports in this image. This field // specifies the starting ordinal number for the export address table. // It is usually set to 1. Base uint32 `json:"base"` // The number of entries in the export address table. NumberOfFunctions uint32 `json:"number_of_functions"` // The number of entries in the name pointer table. This is also the number // of entries in the ordinal table. NumberOfNames uint32 `json:"number_of_names"` // The address of the export address table, relative to the image base. AddressOfFunctions uint32 `json:"address_of_functions"` // The address of the export name pointer table, relative to the image base. // The table size is given by the Number of Name Pointers field. AddressOfNames uint32 `json:"address_of_names"` // The address of the ordinal table, relative to the image base. AddressOfNameOrdinals uint32 `json:"address_of_name_ordinals"` } // ExportFunction represents an imported function in the export table. type ExportFunction struct { Ordinal uint32 `json:"ordinal"` FunctionRVA uint32 `json:"function_rva"` NameOrdinal uint32 `json:"name_ordinal"` NameRVA uint32 `json:"name_rva"` Name string `json:"name"` Forwarder string `json:"forwarder"` ForwarderRVA uint32 `json:"forwarder_rva"` } // Export represent the export table. type Export struct { Functions []ExportFunction `json:"functions"` Struct ImageExportDirectory `json:"struct"` Name string `json:"name"` } /* A few notes learned from `Corkami` about parsing export directory: - like many data directories, Exports' size are not necessary, except for forwarding. - Characteristics, TimeDateStamp, MajorVersion and MinorVersion are not necessary. - the export name is not necessary, and can be anything. - AddressOfNames is lexicographically-ordered. - export names can have any value (even null or more than 65536 characters long, with unprintable characters), just null terminated. - an EXE can have exports (no need of relocation nor DLL flag), and can use them normally - exports can be not used for execution, but for documenting the internal code - numbers of functions will be different from number of names when the file is exporting some functions by ordinal. */ func (pe *File) parseExportDirectory(rva, size uint32) error { // Define some vars. exp := Export{} exportDir := ImageExportDirectory{} errorMsg := fmt.Sprintf("Error parsing export directory at RVA: 0x%x", rva) fileOffset := pe.GetOffsetFromRva(rva) exportDirSize := uint32(binary.Size(exportDir)) err := pe.structUnpack(&exportDir, fileOffset, exportDirSize) if err != nil { return errors.New(errorMsg) } exp.Struct = exportDir // We keep track of the bytes left in the file and use it to set a upper // bound in the number of items that can be read from the different arrays. lengthUntilEOF := func(rva uint32) uint32 { return pe.size - pe.GetOffsetFromRva(rva) } var length uint32 var addressOfNames []byte // Some DLLs have null number of functions. if exportDir.NumberOfFunctions == 0 { pe.Anomalies = append(pe.Anomalies, AnoNullNumberOfFunctions) } // Some DLLs have null address of functions. if exportDir.AddressOfFunctions == 0 { pe.Anomalies = append(pe.Anomalies, AnoNullAddressOfFunctions) } length = min(lengthUntilEOF(exportDir.AddressOfNames), exportDir.NumberOfNames*4) addressOfNames, err = pe.GetData(exportDir.AddressOfNames, length) if err != nil { return errors.New(errorMsg) } length = min(lengthUntilEOF(exportDir.AddressOfNameOrdinals), exportDir.NumberOfNames*4) addressOfNameOrdinals, err := pe.GetData(exportDir.AddressOfNameOrdinals, length) if err != nil { return errors.New(errorMsg) } length = min(lengthUntilEOF(exportDir.AddressOfFunctions), exportDir.NumberOfFunctions*4) addressOfFunctions, err := pe.GetData(exportDir.AddressOfFunctions, length) if err != nil { return errors.New(errorMsg) } exp.Name = pe.getStringAtRVA(exportDir.Name, 0x100000) maxFailedEntries := 10 var forwarderStr string var forwarderOffset uint32 safetyBoundary := pe.size // overly generous upper bound symbolCounts := make(map[uint32]int) parsingFailed := false // read the image export directory section := pe.getSectionByRva(exportDir.AddressOfNames) if section != nil { safetyBoundary = (section.Header.VirtualAddress + uint32(len(section.Data(0, 0, pe)))) - exportDir.AddressOfNames } numNames := min(exportDir.NumberOfNames, safetyBoundary/4) var symbolAddress uint32 for i := uint32(0); i < numNames; i++ { defer func() { // recover from panic if one occured. Set err to nil otherwise. if recover() != nil { err = errors.New("array index out of bounds") } }() symbolOrdinal := binary.LittleEndian.Uint16(addressOfNameOrdinals[i*2:]) symbolAddress = binary.LittleEndian.Uint32(addressOfFunctions[symbolOrdinal*4:]) if symbolAddress == 0 { continue } // If the function's RVA points within the export directory // it will point to a string with the forwarded symbol's string // instead of pointing the the function start address. if symbolAddress >= rva && symbolAddress < rva+size { forwarderStr = pe.getStringAtRVA(symbolAddress, 0x100000) forwarderOffset = pe.GetOffsetFromRva(symbolAddress) } else { forwarderStr = "" fileOffset = 0 } symbolNameAddress := binary.LittleEndian.Uint32(addressOfNames[i*4:]) if symbolNameAddress == 0 { maxFailedEntries-- if maxFailedEntries <= 0 { parsingFailed = true break } } symbolName := pe.getStringAtRVA(symbolNameAddress, 0x100000) if !IsValidFunctionName(symbolName) { parsingFailed = true break } symbolNameOffset := pe.GetOffsetFromRva(symbolNameAddress) if symbolNameOffset == 0 { maxFailedEntries-- if maxFailedEntries <= 0 { parsingFailed = true break } } // File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 // was being parsed as potentially containing millions of exports. // Checking for duplicates addresses the issue. symbolCounts[symbolAddress]++ if symbolCounts[symbolAddress] > 10 { if !stringInSlice(ErrExportManyRepeatedEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, ErrExportManyRepeatedEntries) } } if len(symbolCounts) > maxExportedSymbols { if !stringInSlice(ErrExportMaxOrdEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, ErrExportMaxOrdEntries) } } newExport := ExportFunction{ Name: symbolName, NameRVA: symbolNameAddress, NameOrdinal: uint32(symbolOrdinal), Ordinal: exportDir.Base + uint32(symbolOrdinal), FunctionRVA: symbolAddress, Forwarder: forwarderStr, ForwarderRVA: forwarderOffset, } exp.Functions = append(exp.Functions, newExport) } if parsingFailed { fmt.Printf("RVA AddressOfNames in the export directory points to an "+ "invalid address: 0x%x\n", exportDir.AddressOfNames) } maxFailedEntries = 10 section = pe.getSectionByRva(exportDir.AddressOfFunctions) // Overly generous upper bound safetyBoundary = pe.size if section != nil { safetyBoundary = section.Header.VirtualAddress + uint32(len(section.Data(0, 0, pe))) - exportDir.AddressOfNames } parsingFailed = false ordinals := make(map[uint32]bool) for _, export := range exp.Functions { ordinals[export.Ordinal] = true } numNames = min(exportDir.NumberOfFunctions, safetyBoundary/4) for i := uint32(0); i < numNames; i++ { value := i + exportDir.Base if ordinals[value] { continue } if len(addressOfFunctions) >= int(i*4)+4 { symbolAddress = binary.LittleEndian.Uint32(addressOfFunctions[i*4:]) } if symbolAddress == 0 { continue } // Checking for forwarder again. if symbolAddress >= rva && symbolAddress < rva+size { forwarderStr = pe.getStringAtRVA(symbolAddress, 0x100000) forwarderOffset = pe.GetOffsetFromRva(symbolAddress) } else { forwarderStr = "" fileOffset = 0 } // File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 // was being parsed as potentially containing millions of exports. // Checking for duplicates addresses the issue. symbolCounts[symbolAddress]++ if symbolCounts[symbolAddress] > 10 { if !stringInSlice(ErrExportManyRepeatedEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, ErrExportManyRepeatedEntries) } } if len(symbolCounts) > maxExportedSymbols { if !stringInSlice(ErrExportMaxOrdEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, ErrExportMaxOrdEntries) } } newExport := ExportFunction{ Ordinal: exportDir.Base + i, FunctionRVA: symbolAddress, Forwarder: forwarderStr, ForwarderRVA: forwarderOffset, } exp.Functions = append(exp.Functions, newExport) } pe.Export = exp pe.HasExport = true return nil } // GetExportFunctionByRVA return an export function given an RVA. func (pe *File) GetExportFunctionByRVA(rva uint32) ExportFunction { for _, exp := range pe.Export.Functions { if exp.FunctionRVA == rva { return exp } } return ExportFunction{} } ================================================ FILE: exports_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) type TestExport struct { entryCount int entryIndex int name string imgExpDir ImageExportDirectory expFunc ExportFunction } func TestExportDirectory(t *testing.T) { tests := []struct { in string out TestExport }{ { getAbsoluteFilePath("test/kernel32.dll"), TestExport{ entryCount: 1633, entryIndex: 0, name: "KERNEL32.dll", imgExpDir: ImageExportDirectory{ TimeDateStamp: 0x38B369C4, Name: 0x0009E1D2, Base: 0x1, NumberOfFunctions: 0x661, NumberOfNames: 0x661, AddressOfFunctions: 0x0009A208, AddressOfNames: 0x0009BB8C, AddressOfNameOrdinals: 0x0009D510, }, expFunc: ExportFunction{ Ordinal: 0x1, FunctionRVA: 0x0009E1F7, NameRVA: 0x0009E1DF, Name: "AcquireSRWLockExclusive", Forwarder: "NTDLL.RtlAcquireSRWLockExclusive", ForwarderRVA: 0x9CBF7, }, }, }, { getAbsoluteFilePath("test/mfc140u.dll"), TestExport{ entryCount: 14103, entryIndex: 0, name: "KERNEL32.dll", imgExpDir: ImageExportDirectory{ TimeDateStamp: 0x5b8f7bca, Name: 0x3e2e0c, Base: 0x100, NumberOfFunctions: 0x371d, AddressOfFunctions: 0x3d5198, }, expFunc: ExportFunction{ Ordinal: 0x100, FunctionRVA: 0x275fa0, }, }, }, // { // // TODO: ThreadSanitizer failed to allocate 0x000048000000 (1207959552) in Github CI // getAbsoluteFilePath("test/0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1"), // TestExport{ // entryCount: 7728638, // entryIndex: 0, // name: "", // imgExpDir: ImageExportDirectory{ // Characteristics: 0xac0000, // TimeDateStamp: 0xac0000, // MinorVersion: 0xac, // Name: 0xac0000, // Base: 0xac0000, // NumberOfFunctions: 0xac0000, // NumberOfNames: 0xac0000, // AddressOfFunctions: 0xac0000, // AddressOfNames: 0xac0000, // AddressOfNameOrdinals: 0xac0000, // }, // expFunc: ExportFunction{ // Ordinal: 0xac0000, // FunctionRVA: 0xac0000, // NameRVA: 0xac0000, // }, // }, // }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryExport] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryExport] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseExportDirectory(va, size) if err != nil { t.Fatalf("parseExportDirectory(%s) failed, reason: %v", tt.in, err) } export := file.Export if len(export.Functions) != tt.out.entryCount { t.Fatalf("export functions count assertion failed, got %v, want %v", len(export.Functions), tt.out.entryCount) } imgExpDir := export.Struct if imgExpDir != tt.out.imgExpDir { t.Fatalf("image export directory assertion failed, got %v, want %v", imgExpDir, tt.out.imgExpDir) } if len(export.Functions) > 0 { expFunc := export.Functions[tt.out.entryIndex] if expFunc != tt.out.expFunc { t.Fatalf("export entry assertion failed, got %v, want %v", expFunc, tt.out.expFunc) } } }) } } ================================================ FILE: file.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "errors" "os" "github.com/edsrzf/mmap-go" "github.com/saferwall/pe/log" ) // A File represents an open PE file. type File struct { DOSHeader ImageDOSHeader `json:"dos_header,omitempty"` RichHeader RichHeader `json:"rich_header,omitempty"` NtHeader ImageNtHeader `json:"nt_header,omitempty"` COFF COFF `json:"coff,omitempty"` Sections []Section `json:"sections,omitempty"` Imports []Import `json:"imports,omitempty"` Export Export `json:"export,omitempty"` Debugs []DebugEntry `json:"debugs,omitempty"` Relocations []Relocation `json:"relocations,omitempty"` Resources ResourceDirectory `json:"resources,omitempty"` TLS TLSDirectory `json:"tls,omitempty"` LoadConfig LoadConfig `json:"load_config,omitempty"` Exceptions []Exception `json:"exceptions,omitempty"` Certificates CertificateSection `json:"certificates,omitempty"` DelayImports []DelayImport `json:"delay_imports,omitempty"` BoundImports []BoundImportDescriptorData `json:"bound_imports,omitempty"` GlobalPtr uint32 `json:"global_ptr,omitempty"` CLR CLRData `json:"clr,omitempty"` IAT []IATEntry `json:"iat,omitempty"` Anomalies []string `json:"anomalies,omitempty"` Header []byte data mmap.MMap FileInfo size uint32 OverlayOffset int64 f *os.File opts *Options logger *log.Helper } // Options that influence the PE parsing behaviour. type Options struct { // Parse only the PE header and do not parse data directories, by default (false). Fast bool // Includes section entropy, by default (false). SectionEntropy bool // Maximum COFF symbols to parse, by default (MaxDefaultCOFFSymbolsCount). MaxCOFFSymbolsCount uint32 // Maximum relocations to parse, by default (MaxDefaultRelocEntriesCount). MaxRelocEntriesCount uint32 // Disable certificate validation, by default (false). DisableCertValidation bool // Disable signature validation, by default (false). DisableSignatureValidation bool // A custom logger. Logger log.Logger // OmitExportDirectory determines if export directory parsing is skipped, by default (false). OmitExportDirectory bool // OmitImportDirectory determines if import directory parsing is skipped, by default (false). OmitImportDirectory bool // OmitExceptionDirectory determines if exception directory parsing is skipped, by default (false). OmitExceptionDirectory bool // OmitResourceDirectory determines if resource directory parsing is skipped, by default (false). OmitResourceDirectory bool // OmitSecurityDirectory determines if security directory parsing is skipped, by default (false). OmitSecurityDirectory bool // OmitRelocDirectory determines if relocation directory parsing is skipped, by default (false). OmitRelocDirectory bool // OmitDebugDirectory determines if debug directory parsing is skipped, by default (false). OmitDebugDirectory bool // OmitArchitectureDirectory determines if architecture directory parsing is skipped, by default (false). OmitArchitectureDirectory bool // OmitGlobalPtrDirectory determines if global pointer directory parsing is skipped, by default (false). OmitGlobalPtrDirectory bool // OmitTLSDirectory determines if TLS directory parsing is skipped, by default (false). OmitTLSDirectory bool // OmitLoadConfigDirectory determines if load config directory parsing is skipped, by default (false). OmitLoadConfigDirectory bool // OmitBoundImportDirectory determines if bound import directory parsing is skipped, by default (false). OmitBoundImportDirectory bool // OmitIATDirectory determines if IAT directory parsing is skipped, by default (false). OmitIATDirectory bool // OmitDelayImportDirectory determines if delay import directory parsing is skipped, by default (false). OmitDelayImportDirectory bool // OmitCLRHeaderDirectory determines if CLR header directory parsing is skipped, by default (false). OmitCLRHeaderDirectory bool // OmitCLRMetadata determines if CLR metadata parsing is skipped, by default (false). OmitCLRMetadata bool } // New instantiates a file instance with options given a file name. func New(name string, opts *Options) (*File, error) { f, err := os.Open(name) if err != nil { return nil, err } return NewFile(f, opts) } // NewFile instantiates a file instance with options given a file handle. func NewFile(f *os.File, opts *Options) (*File, error) { // Memory map the file instead of using read/write. data, err := mmap.Map(f, mmap.RDONLY, 0) if err != nil { f.Close() return nil, err } file := File{} if opts != nil { file.opts = opts } else { file.opts = &Options{} } if file.opts.MaxCOFFSymbolsCount == 0 { file.opts.MaxCOFFSymbolsCount = MaxDefaultCOFFSymbolsCount } if file.opts.MaxRelocEntriesCount == 0 { file.opts.MaxRelocEntriesCount = MaxDefaultRelocEntriesCount } var logger log.Logger if file.opts.Logger == nil { logger = log.NewStdLogger(os.Stdout) file.logger = log.NewHelper(log.NewFilter(logger, log.FilterLevel(log.LevelError))) } else { file.logger = log.NewHelper(file.opts.Logger) } file.data = data file.size = uint32(len(file.data)) file.f = f return &file, nil } // NewBytes instantiates a file instance with options given a memory buffer. func NewBytes(data []byte, opts *Options) (*File, error) { file := File{} if opts != nil { file.opts = opts } else { file.opts = &Options{} } if file.opts.MaxCOFFSymbolsCount == 0 { file.opts.MaxCOFFSymbolsCount = MaxDefaultCOFFSymbolsCount } if file.opts.MaxRelocEntriesCount == 0 { file.opts.MaxRelocEntriesCount = MaxDefaultRelocEntriesCount } var logger log.Logger if file.opts.Logger == nil { logger = log.NewStdLogger(os.Stdout) file.logger = log.NewHelper(log.NewFilter(logger, log.FilterLevel(log.LevelError))) } else { file.logger = log.NewHelper(opts.Logger) } file.data = data file.size = uint32(len(file.data)) return &file, nil } func (pe *File) Close() error { _ = pe.Unmap() if pe.f != nil { return pe.f.Close() } return nil } // Close memory mapped file func (pe *File) Unmap() error { if pe.data != nil { return pe.data.Unmap() } return nil } // Parse performs the file parsing for a PE binary. func (pe *File) Parse() error { // check for the smallest PE size. if len(pe.data) < TinyPESize { return ErrInvalidPESize } // Parse the DOS header. err := pe.ParseDOSHeader() if err != nil { return err } // Parse the Rich header. err = pe.ParseRichHeader() if err != nil { pe.logger.Errorf("rich header parsing failed: %v", err) } // Parse the NT header. err = pe.ParseNTHeader() if err != nil { return err } // Parse COFF symbol table. err = pe.ParseCOFFSymbolTable() if err != nil { pe.logger.Debugf("coff symbols parsing failed: %v", err) } // Parse the Section Header. err = pe.ParseSectionHeader() if err != nil { return err } // In fast mode, do not parse data directories. if pe.opts.Fast { return nil } // Parse the Data Directory entries. return pe.ParseDataDirectories() } // String stringify the data directory entry. func (entry ImageDirectoryEntry) String() string { dataDirMap := map[ImageDirectoryEntry]string{ ImageDirectoryEntryExport: "Export", ImageDirectoryEntryImport: "Import", ImageDirectoryEntryResource: "Resource", ImageDirectoryEntryException: "Exception", ImageDirectoryEntryCertificate: "Security", ImageDirectoryEntryBaseReloc: "Relocation", ImageDirectoryEntryDebug: "Debug", ImageDirectoryEntryArchitecture: "Architecture", ImageDirectoryEntryGlobalPtr: "GlobalPtr", ImageDirectoryEntryTLS: "TLS", ImageDirectoryEntryLoadConfig: "LoadConfig", ImageDirectoryEntryBoundImport: "BoundImport", ImageDirectoryEntryIAT: "IAT", ImageDirectoryEntryDelayImport: "DelayImport", ImageDirectoryEntryCLR: "CLR", ImageDirectoryEntryReserved: "Reserved", } return dataDirMap[entry] } // ParseDataDirectories parses the data directories. The DataDirectory is an // array of 16 structures. Each array entry has a predefined meaning for what // it refers to. func (pe *File) ParseDataDirectories() error { foundErr := false oh32 := ImageOptionalHeader32{} oh64 := ImageOptionalHeader64{} switch pe.Is64 { case true: oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) case false: oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) } // Maps data directory index to function which parses that directory. funcMaps := make(map[ImageDirectoryEntry]func(uint32, uint32) error) if !pe.opts.OmitExportDirectory { funcMaps[ImageDirectoryEntryExport] = pe.parseExportDirectory } if !pe.opts.OmitImportDirectory { funcMaps[ImageDirectoryEntryImport] = pe.parseImportDirectory } if !pe.opts.OmitExceptionDirectory { funcMaps[ImageDirectoryEntryException] = pe.parseExceptionDirectory } if !pe.opts.OmitResourceDirectory { funcMaps[ImageDirectoryEntryResource] = pe.parseResourceDirectory } if !pe.opts.OmitSecurityDirectory { funcMaps[ImageDirectoryEntryCertificate] = pe.parseSecurityDirectory } if !pe.opts.OmitRelocDirectory { funcMaps[ImageDirectoryEntryBaseReloc] = pe.parseRelocDirectory } if !pe.opts.OmitDebugDirectory { funcMaps[ImageDirectoryEntryDebug] = pe.parseDebugDirectory } if !pe.opts.OmitArchitectureDirectory { funcMaps[ImageDirectoryEntryArchitecture] = pe.parseArchitectureDirectory } if !pe.opts.OmitGlobalPtrDirectory { funcMaps[ImageDirectoryEntryGlobalPtr] = pe.parseGlobalPtrDirectory } if !pe.opts.OmitTLSDirectory { funcMaps[ImageDirectoryEntryTLS] = pe.parseTLSDirectory } if !pe.opts.OmitLoadConfigDirectory { funcMaps[ImageDirectoryEntryLoadConfig] = pe.parseLoadConfigDirectory } if !pe.opts.OmitBoundImportDirectory { funcMaps[ImageDirectoryEntryBoundImport] = pe.parseBoundImportDirectory } if !pe.opts.OmitIATDirectory { funcMaps[ImageDirectoryEntryIAT] = pe.parseIATDirectory } if !pe.opts.OmitDelayImportDirectory { funcMaps[ImageDirectoryEntryDelayImport] = pe.parseDelayImportDirectory } if !pe.opts.OmitCLRHeaderDirectory { funcMaps[ImageDirectoryEntryCLR] = pe.parseCLRHeaderDirectory } // Iterate over data directories and call the appropriate function. for entryIndex := ImageDirectoryEntry(0); entryIndex < ImageNumberOfDirectoryEntries; entryIndex++ { var va, size uint32 switch pe.Is64 { case true: dirEntry := oh64.DataDirectory[entryIndex] va = dirEntry.VirtualAddress size = dirEntry.Size case false: dirEntry := oh32.DataDirectory[entryIndex] va = dirEntry.VirtualAddress size = dirEntry.Size } if va != 0 { func() { // keep parsing data directories even though some entries fails. defer func() { if e := recover(); e != nil { pe.logger.Errorf("unhandled exception when parsing data directory %s, reason: %v", entryIndex.String(), e) foundErr = true } }() // the last entry in the data directories is reserved and must be zero. if entryIndex == ImageDirectoryEntryReserved { pe.Anomalies = append(pe.Anomalies, AnoReservedDataDirectoryEntry) return } parseDirectory, ok := funcMaps[entryIndex] if !ok { return } err := parseDirectory(va, size) if err != nil { pe.logger.Warnf("failed to parse data directory %s, reason: %v", entryIndex.String(), err) } }() } } if foundErr { return errors.New("Data directory parsing failed") } return nil } ================================================ FILE: file_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "io/ioutil" "os" "testing" ) var peTests = []struct { in string out error }{ {getAbsoluteFilePath("test/putty.exe"), nil}, } func TestParse(t *testing.T) { for _, tt := range peTests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } got := file.Parse() if got != nil { t.Errorf("Parse(%s) got %v, want %v", tt.in, got, tt.out) } }) } } func TestParseOmitDirectories(t *testing.T) { for _, tt := range peTests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{OmitSecurityDirectory: true}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } got := file.Parse() if got != nil { t.Errorf("Parse(%s) got %v, want %v", tt.in, got, tt.out) } // Should expect an empty certificate if file.Certificates.Raw != nil { t.Errorf("Parse(%s) expected empty certificate", tt.in) } }) } } func TestNewBytes(t *testing.T) { for _, tt := range peTests { t.Run(tt.in, func(t *testing.T) { data, _ := ioutil.ReadFile(tt.in) file, err := NewBytes(data, &Options{}) if err != nil { t.Fatalf("NewBytes(%s) failed, reason: %v", tt.in, err) } got := file.Parse() if got != nil { t.Errorf("Parse(%s) got %v, want %v", tt.in, got, tt.out) } }) } } func TestChecksum(t *testing.T) { tests := []struct { in string out uint32 }{ // file is DWORD aligned. {getAbsoluteFilePath("test/putty.exe"), 0x00122C22}, // file is not DWORD aligned and needs paddings. {getAbsoluteFilePath("test/010001e68577ef704792448ff474d22c6545167231982447c568e55041169ef0"), 0x0006D558}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.Checksum() if got != tt.out { t.Errorf("Checksum(%s) got %v, want %v", tt.in, got, tt.out) } }) } } func TestCanParseWithHandleAndClose(t *testing.T) { for _, tt := range peTests { t.Run(tt.in, func(t *testing.T) { file, err := os.Open(tt.in) if err != nil { t.Fatalf("Open file(%s) failed", tt.in) } pefile, err := NewFile(file, &Options{}) if err != nil { t.Fatalf("NewFile (%s) failed", tt.in) } err = pefile.Parse() if err != nil { t.Fatalf("Parse (%s) failed", tt.in) } err = pefile.Unmap() if err != nil { t.Fatalf("Unmap (%s) failed", tt.in) } const len = 2 header := [len]byte{} n, err := file.ReadAt(header[:], 0) if err != nil { t.Fatalf("Failed to read after unmap (%s)", tt.in) } if n != len { t.Fatalf("Failed to read data (%s)", tt.in) } err = file.Close() if err != nil { t.Fatalf("Failed to close file (%s)", tt.in) } }) } } ================================================ FILE: globalptr.go ================================================ // Copyright 2022 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe const ( // AnoInvalidGlobalPtrReg is reported when the global pointer register offset is outide the image. AnoInvalidGlobalPtrReg = "Global pointer register offset outside of PE image" ) // RVA of the value to be stored in the global pointer register. The size must // be set to 0. This data directory is set to all zeros if the target // architecture (for example, I386 or AMD64) does not use the concept of a // global pointer. func (pe *File) parseGlobalPtrDirectory(rva, size uint32) error { var err error // RVA of the value to be stored in the global pointer register. offset := pe.GetOffsetFromRva(rva) if offset == ^uint32(0) { // Fake global pointer data directory // sample: 0101f36de484fbc7bfbe6cb942a1ecf6fac0c3acd9f65b88b19400582d7e7007 pe.Anomalies = append(pe.Anomalies, AnoInvalidGlobalPtrReg) return nil } pe.GlobalPtr, err = pe.ReadUint32(offset) if err != nil { return err } pe.HasGlobalPtr = true return nil } ================================================ FILE: go.mod ================================================ module github.com/saferwall/pe go 1.15 require ( github.com/ayoubfaouzi/pkcs7 v0.2.3 github.com/edsrzf/mmap-go v1.1.0 golang.org/x/text v0.22.0 ) ================================================ FILE: go.sum ================================================ github.com/ayoubfaouzi/pkcs7 v0.2.3 h1:XGCYHteXgclHnNlPdCF8aFyoUKwP9VhLQp+VX+hBZ3U= github.com/ayoubfaouzi/pkcs7 v0.2.3/go.mod h1:u1EPWZOeIdVRo6C0/FVjB91Nsletw+8vZeAaAmeyJvQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= ================================================ FILE: helper.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "bytes" "encoding/binary" "errors" "golang.org/x/text/encoding/unicode" "path" "path/filepath" "runtime" "strings" ) const ( // TinyPESize On Windows XP (x32) the smallest PE executable is 97 bytes. TinyPESize = 97 // FileAlignmentHardcodedValue represents the value which PointerToRawData // should be at least equal or bigger to, or it will be rounded to zero. // According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html // if PointerToRawData is less that 0x200 it's rounded to zero. FileAlignmentHardcodedValue = 0x200 ) // Errors var ( // ErrInvalidPESize is returned when the file size is less that the smallest // PE file size possible.ErrImageOS2SignatureFound ErrInvalidPESize = errors.New("not a PE file, smaller than tiny PE") // ErrDOSMagicNotFound is returned when file is potentially a ZM executable. ErrDOSMagicNotFound = errors.New("DOS Header magic not found") // ErrInvalidElfanewValue is returned when e_lfanew is larger than file size. ErrInvalidElfanewValue = errors.New("invalid e_lfanew value. Probably not a PE file") // ErrInvalidNtHeaderOffset is returned when the NT Header offset is beyond // the image file. ErrInvalidNtHeaderOffset = errors.New( "invalid NT Header Offset. NT Header Signature not found") // ErrImageOS2SignatureFound is returned when signature is for a NE file. ErrImageOS2SignatureFound = errors.New( "not a valid PE signature. Probably a NE file") // ErrImageOS2LESignatureFound is returned when signature is for a LE file. ErrImageOS2LESignatureFound = errors.New( "not a valid PE signature. Probably an LE file") // ErrImageVXDSignatureFound is returned when signature is for a LX file. ErrImageVXDSignatureFound = errors.New( "not a valid PE signature. Probably an LX file") // ErrImageTESignatureFound is returned when signature is for a TE file. ErrImageTESignatureFound = errors.New( "not a valid PE signature. Probably a TE file") // ErrImageNtSignatureNotFound is returned when PE magic signature is not found. ErrImageNtSignatureNotFound = errors.New( "not a valid PE signature. Magic not found") // ErrImageNtOptionalHeaderMagicNotFound is returned when optional header // magic is different from PE32/PE32+. ErrImageNtOptionalHeaderMagicNotFound = errors.New( "not a valid PE signature. Optional Header magic not found") // ErrImageBaseNotAligned is reported when the image base is not aligned to 64K. ErrImageBaseNotAligned = errors.New( "corrupt PE file. Image base not aligned to 64 K") // AnoImageBaseOverflow is reported when the image base + SizeOfImage is // larger than 80000000h/FFFF080000000000h in PE32/P32+. AnoImageBaseOverflow = "Image base beyond allowed address" // ErrInvalidSectionFileAlignment is reported when section alignment is less than a // PAGE_SIZE and section alignment != file alignment. ErrInvalidSectionFileAlignment = errors.New("corrupt PE file. Section " + "alignment is less than a PAGE_SIZE and section alignment != file alignment") // AnoInvalidSizeOfImage is reported when SizeOfImage is not multiple of // SectionAlignment. AnoInvalidSizeOfImage = "Invalid SizeOfImage value, should be multiple " + "of SectionAlignment" // ErrOutsideBoundary is reported when attempting to read an address beyond // file image limits. ErrOutsideBoundary = errors.New("reading data outside boundary") ) // Max returns the larger of x or y. func Max(x, y uint32) uint32 { if x < y { return y } return x } func min(a, b uint32) uint32 { if a < b { return a } return b } // Min returns the min number in a slice. func Min(values []uint32) uint32 { min := values[0] for _, v := range values { if v < min { min = v } } return min } // IsValidDosFilename returns true if the DLL name is likely to be valid. // Valid FAT32 8.3 short filename characters according to: // http://en.wikipedia.org/wiki/8.3_filename // The filename length is not checked because the DLLs filename // can be longer that the 8.3 func IsValidDosFilename(filename string) bool { alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" numerals := "0123456789" special := "!#$%&'()-@^_`{}~+,.;=[]\\/" charset := alphabet + numerals + special for _, c := range filename { if !strings.Contains(charset, string(c)) { return false } } return true } // IsValidFunctionName checks if an imported name uses the valid accepted // characters expected in mangled function names. If the symbol's characters // don't fall within this charset we will assume the name is invalid. func IsValidFunctionName(functionName string) bool { alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" numerals := "0123456789" special := "_?@$()<>" charset := alphabet + numerals + special for _, c := range functionName { if !strings.Contains(charset, string(c)) { return false } } return true } // IsPrintable checks weather a string is printable. func IsPrintable(s string) bool { alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" numerals := "0123456789" whitespace := " \t\n\r\v\f" special := "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" charset := alphabet + numerals + special + whitespace for _, c := range s { if !strings.Contains(charset, string(c)) { return false } } return true } // getSectionByRva returns the section containing the given address. func (pe *File) getSectionByRva(rva uint32) *Section { for _, section := range pe.Sections { if section.Contains(rva, pe) { return §ion } } return nil } // getSectionByRva returns the section name containing the given address. func (pe *File) getSectionNameByRva(rva uint32) string { for _, section := range pe.Sections { if section.Contains(rva, pe) { return section.String() } } return "" } func (pe *File) getSectionByOffset(offset uint32) *Section { for _, section := range pe.Sections { if section.Header.PointerToRawData == 0 { continue } adjustedPointer := pe.adjustFileAlignment( section.Header.PointerToRawData) if adjustedPointer <= offset && offset < (adjustedPointer+section.Header.SizeOfRawData) { return §ion } } return nil } // GetOffsetFromRva returns the file offset corresponding to this RVA. func (pe *File) GetOffsetFromRva(rva uint32) uint32 { // Given a RVA, this method will find the section where the // data lies and return the offset within the file. section := pe.getSectionByRva(rva) if section == nil { if rva < uint32(len(pe.data)) { return rva } return ^uint32(0) } sectionAlignment := pe.adjustSectionAlignment(section.Header.VirtualAddress) fileAlignment := pe.adjustFileAlignment(section.Header.PointerToRawData) return rva - sectionAlignment + fileAlignment } // GetRVAFromOffset returns an RVA given an offset. func (pe *File) GetRVAFromOffset(offset uint32) uint32 { section := pe.getSectionByOffset(offset) minAddr := ^uint32(0) if section == nil { if len(pe.Sections) == 0 { return offset } for _, section := range pe.Sections { vaddr := pe.adjustSectionAlignment(section.Header.VirtualAddress) if vaddr < minAddr { minAddr = vaddr } } // Assume that offset lies within the headers // The case illustrating this behavior can be found at: // http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html // where the import table is not contained by any section // hence the RVA needs to be resolved to a raw offset if offset < minAddr { return offset } pe.logger.Warn("data at Offset can't be fetched. Corrupt header?") return ^uint32(0) } sectionAlignment := pe.adjustSectionAlignment(section.Header.VirtualAddress) fileAlignment := pe.adjustFileAlignment(section.Header.PointerToRawData) return offset - fileAlignment + sectionAlignment } func (pe *File) getSectionByName(secName string) (section *ImageSectionHeader) { for _, section := range pe.Sections { if section.String() == secName { return §ion.Header } } return nil } // getStringAtRVA returns an ASCII string located at the given address. func (pe *File) getStringAtRVA(rva, maxLen uint32) string { if rva == 0 { return "" } section := pe.getSectionByRva(rva) if section == nil { if rva > pe.size { return "" } end := rva + maxLen if end > pe.size { end = pe.size } s := pe.GetStringFromData(0, pe.data[rva:end]) return string(s) } s := pe.GetStringFromData(0, section.Data(rva, maxLen, pe)) return string(s) } func (pe *File) readUnicodeStringAtRVA(rva uint32, maxLength uint32) string { str := "" offset := pe.GetOffsetFromRva(rva) i := uint32(0) for i = 0; i < maxLength; i += 2 { if offset+i >= pe.size || pe.data[offset+i] == 0 { break } str += string(pe.data[offset+i]) } return str } func (pe *File) readASCIIStringAtOffset(offset, maxLength uint32) (uint32, string) { str := "" i := uint32(0) for i = 0; i < maxLength; i++ { if offset+i >= pe.size || pe.data[offset+i] == 0 { break } str += string(pe.data[offset+i]) } return i, str } // GetStringFromData returns ASCII string from within the data. func (pe *File) GetStringFromData(offset uint32, data []byte) []byte { dataSize := uint32(len(data)) if dataSize == 0 { return nil } if offset > dataSize { return nil } end := offset for end < dataSize { if data[end] == 0 { break } end++ } return data[offset:end] } // getStringAtOffset returns a string given an offset. func (pe *File) getStringAtOffset(offset, size uint32) (string, error) { if offset+size > pe.size { return "", ErrOutsideBoundary } str := string(pe.data[offset : offset+size]) return strings.Replace(str, "\x00", "", -1), nil } // GetData returns the data given an RVA regardless of the section where it // lies on. func (pe *File) GetData(rva, length uint32) ([]byte, error) { // Given a RVA and the size of the chunk to retrieve, this method // will find the section where the data lies and return the data. section := pe.getSectionByRva(rva) var end uint32 if length > 0 { end = rva + length } else { end = 0 } if section == nil { if rva < uint32(len(pe.Header)) { return pe.Header[rva:end], nil } // Before we give up we check whether the file might contain the data // anyway. There are cases of PE files without sections that rely on // windows loading the first 8291 bytes into memory and assume the data // will be there. A functional file with these characteristics is: // MD5: 0008892cdfbc3bda5ce047c565e52295 // SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9 if rva < uint32(len(pe.data)) { return pe.data[rva:end], nil } return nil, errors.New("data at RVA can't be fetched. Corrupt header?") } return section.Data(rva, length, pe), nil } // The alignment factor (in bytes) that is used to align the raw data of sections // in the image file. The value should be a power of 2 between 512 and 64 K, // inclusive. The default is 512. If the SectionAlignment is less than the // architecture's page size, then FileAlignment must match SectionAlignment. func (pe *File) adjustFileAlignment(va uint32) uint32 { var fileAlignment uint32 switch pe.Is64 { case true: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment case false: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment } if fileAlignment > FileAlignmentHardcodedValue && fileAlignment%2 != 0 { pe.Anomalies = append(pe.Anomalies, ErrInvalidFileAlignment) } if fileAlignment < FileAlignmentHardcodedValue { return va } // round it to 0x200 if not power of 2. // According to https://github.com/corkami/docs/blob/master/PE/PE.md // if PointerToRawData is less that 0x200 it's rounded to zero. Loading the // test file in a debugger it's easy to verify that the PointerToRawData // value of 1 is rounded to zero. Hence we reproduce the behavior return (va / 0x200) * 0x200 } // The alignment (in bytes) of sections when they are loaded into memory // It must be greater than or equal to FileAlignment. The default is the // page size for the architecture. func (pe *File) adjustSectionAlignment(va uint32) uint32 { var fileAlignment, sectionAlignment uint32 switch pe.Is64 { case true: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment sectionAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).SectionAlignment case false: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment sectionAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).SectionAlignment } if fileAlignment < FileAlignmentHardcodedValue && fileAlignment != sectionAlignment { pe.Anomalies = append(pe.Anomalies, ErrInvalidSectionAlignment) } if sectionAlignment < 0x1000 { // page size sectionAlignment = fileAlignment } // 0x200 is the minimum valid FileAlignment according to the documentation // although ntoskrnl.exe has an alignment of 0x80 in some Windows versions if sectionAlignment != 0 && va%sectionAlignment != 0 { return sectionAlignment * (va / sectionAlignment) } return va } // alignDword aligns the offset on a 32-bit boundary. func alignDword(offset, base uint32) uint32 { return ((offset + base + 3) & 0xfffffffc) - (base & 0xfffffffc) } // stringInSlice checks weather a string exists in a slice of strings. func stringInSlice(a string, list []string) bool { for _, b := range list { if b == a { return true } } return false } // intInSlice checks weather a uint32 exists in a slice of uint32. func intInSlice(a uint32, list []uint32) bool { for _, b := range list { if b == a { return true } } return false } // IsDriver returns true if the PE file is a Windows driver. func (pe *File) IsDriver() bool { // Checking that the ImageBase field of the OptionalHeader is above or // equal to 0x80000000 (that is, whether it lies in the upper 2GB of //the address space, normally belonging to the kernel) is not a // reliable enough indicator. For instance, PEs that play the invalid // ImageBase trick to get relocated could be incorrectly assumed to be // drivers. // Checking if any section characteristics have the IMAGE_SCN_MEM_NOT_PAGED // flag set is not reliable either. // If there's still no import directory (the PE doesn't have one or it's // malformed), give up. if len(pe.Imports) == 0 { return false } // DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty. // If it imports from "ntoskrnl.exe" or other kernel components it should // be a driver. systemDLLs := []string{"ntoskrnl.exe", "hal.dll", "ndis.sys", "bootvid.dll", "kdcom.dll"} for _, dll := range pe.Imports { if stringInSlice(strings.ToLower(dll.Name), systemDLLs) { return true } } // If still we couldn't tell, check common driver section with combination // of IMAGE_SUBSYSTEM_NATIVE or IMAGE_SUBSYSTEM_NATIVE_WINDOWS. subsystem := ImageOptionalHeaderSubsystemType(0) oh32 := ImageOptionalHeader32{} oh64 := ImageOptionalHeader64{} switch pe.Is64 { case true: oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) subsystem = oh64.Subsystem case false: oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) subsystem = oh32.Subsystem } commonDriverSectionNames := []string{"page", "paged", "nonpage", "init"} for _, section := range pe.Sections { s := strings.ToLower(section.String()) if stringInSlice(s, commonDriverSectionNames) && (subsystem&ImageSubsystemNativeWindows != 0 || subsystem&ImageSubsystemNative != 0) { return true } } return false } // IsDLL returns true if the PE file is a standard DLL. func (pe *File) IsDLL() bool { return pe.NtHeader.FileHeader.Characteristics&ImageFileDLL != 0 } // IsEXE returns true if the PE file is a standard executable. func (pe *File) IsEXE() bool { // Returns true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set // and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either. if pe.IsDLL() || pe.IsDriver() { return false } if pe.NtHeader.FileHeader.Characteristics&ImageFileExecutableImage == 0 { return false } return true } // Checksum calculates the PE checksum as generated by CheckSumMappedFile(). func (pe *File) Checksum() uint32 { var checksum uint64 = 0 var max uint64 = 0x100000000 currentDword := uint32(0) // Get the Checksum offset. optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + uint32(binary.Size(pe.NtHeader.FileHeader)) // `CheckSum` field position in optional PE headers is always 64 for PE and PE+. checksumOffset := optionalHeaderOffset + 64 // Verify the data is DWORD-aligned and add padding if needed remainder := pe.size % 4 dataLen := pe.size if remainder > 0 { dataLen = pe.size + (4 - remainder) paddedBytes := make([]byte, 4-remainder) pe.data = append(pe.data, paddedBytes...) } for i := uint32(0); i < dataLen; i += 4 { // Skip the checksum field. if i == checksumOffset { continue } // Read DWORD from file. currentDword = binary.LittleEndian.Uint32(pe.data[i:]) // Calculate checksum. checksum = (checksum & 0xffffffff) + uint64(currentDword) + (checksum >> 32) if checksum > max { checksum = (checksum & 0xffffffff) + (checksum >> 32) } } checksum = (checksum & 0xffff) + (checksum >> 16) checksum = checksum + (checksum >> 16) checksum = checksum & 0xffff // The length is the one of the original data, not the padded one checksum += uint64(pe.size) return uint32(checksum) } // ReadUint64 read a uint64 from a buffer. func (pe *File) ReadUint64(offset uint32) (uint64, error) { if offset+8 > pe.size { return 0, ErrOutsideBoundary } return binary.LittleEndian.Uint64(pe.data[offset:]), nil } // ReadUint32 read a uint32 from a buffer. func (pe *File) ReadUint32(offset uint32) (uint32, error) { if offset > pe.size-4 { return 0, ErrOutsideBoundary } return binary.LittleEndian.Uint32(pe.data[offset:]), nil } // ReadUint16 read a uint16 from a buffer. func (pe *File) ReadUint16(offset uint32) (uint16, error) { if offset > pe.size-2 { return 0, ErrOutsideBoundary } return binary.LittleEndian.Uint16(pe.data[offset:]), nil } // ReadUint8 read a uint8 from a buffer. func (pe *File) ReadUint8(offset uint32) (uint8, error) { if offset+1 > pe.size { return 0, ErrOutsideBoundary } b := pe.data[offset : offset+1][0] return uint8(b), nil } func (pe *File) structUnpack(iface interface{}, offset, size uint32) (err error) { // Boundary check totalSize := offset + size // Integer overflow if (totalSize > offset) != (size > 0) { return ErrOutsideBoundary } if offset >= pe.size || totalSize > pe.size { return ErrOutsideBoundary } buf := bytes.NewReader(pe.data[offset : offset+size]) err = binary.Read(buf, binary.LittleEndian, iface) if err != nil { return err } return nil } // ReadBytesAtOffset returns a byte array from offset. func (pe *File) ReadBytesAtOffset(offset, size uint32) ([]byte, error) { // Boundary check totalSize := offset + size // Integer overflow if (totalSize > offset) != (size > 0) { return nil, ErrOutsideBoundary } if offset >= pe.size || totalSize > pe.size { return nil, ErrOutsideBoundary } return pe.data[offset : offset+size], nil } // DecodeUTF16String decodes the UTF16 string from the byte slice. func DecodeUTF16String(b []byte) (string, error) { n := bytes.Index(b, []byte{0, 0}) if n == 0 { return "", nil } decoder := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewDecoder() s, err := decoder.Bytes(b[0 : n+1]) if err != nil { return "", err } return string(s), nil } // IsBitSet returns true when a bit on a particular position is set. func IsBitSet(n uint64, pos int) bool { val := n & (1 << pos) return (val > 0) } func getAbsoluteFilePath(testfile string) string { _, p, _, _ := runtime.Caller(0) return path.Join(filepath.Dir(p), testfile) } ================================================ FILE: helper_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) func TestIsEXE(t *testing.T) { tests := []struct { in string out bool }{ {getAbsoluteFilePath("test/liblzo2-2.dll"), false}, {getAbsoluteFilePath("test/putty.exe"), true}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.IsEXE() if got != tt.out { t.Errorf("IsEXE(%s) got %v, want %v", tt.in, got, tt.out) } }) } } func TestIsDLL(t *testing.T) { tests := []struct { in string out bool }{ {getAbsoluteFilePath("test/liblzo2-2.dll"), true}, {getAbsoluteFilePath("test/putty.exe"), false}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.IsDLL() if got != tt.out { t.Errorf("IsDLL(%s) got %v, want %v", tt.in, got, tt.out) } }) } } func TestIsDriver(t *testing.T) { tests := []struct { in string out bool }{ {getAbsoluteFilePath("test/liblzo2-2.dll"), false}, {getAbsoluteFilePath("test/WdBoot.sys"), true}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.IsDriver() if got != tt.out { t.Errorf("IsDriver(%s) got %v, want %v", tt.in, got, tt.out) } }) } } ================================================ FILE: iat.go ================================================ // Copyright 2022 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe // IATEntry represents an entry inside the IAT. type IATEntry struct { Index uint32 `json:"index"` Rva uint32 `json:"rva"` Value interface{} `json:"value,omitempty"` Meaning string `json:"meaning"` } // The structure and content of the import address table are identical to those // of the import lookup table, until the file is bound. During binding, the // entries in the import address table are overwritten with the 32-bit (for // PE32) or 64-bit (for PE32+) addresses of the symbols that are being imported. // These addresses are the actual memory addresses of the symbols, although // technically they are still called “virtual addresses.” The loader typically // processes the binding. // // The Import Address Table is there to to only trigger Copy On Write for as // few pages as possible (those being the actual Import Address Table pages // themselves). // This is, partially the reason there's that extra level of indirection in the // PE to begin with. func (pe *File) parseIATDirectory(rva, size uint32) error { var entries []IATEntry var index uint32 var err error startRva := rva for startRva+size > rva { ie := IATEntry{} offset := pe.GetOffsetFromRva(rva) if pe.Is64 { ie.Value, err = pe.ReadUint64(offset) if err != nil { break } ie.Rva = rva rva += 8 } else { ie.Value, err = pe.ReadUint32(offset) if err != nil { break } ie.Rva = rva rva += 4 } ie.Index = index imp, i := pe.GetImportEntryInfoByRVA(rva) if len(imp.Functions) != 0 { ie.Meaning = imp.Name + "!" + imp.Functions[i].Name } entries = append(entries, ie) index++ } pe.IAT = entries pe.HasIAT = true return nil } ================================================ FILE: imports.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "crypto/md5" "encoding/binary" "encoding/hex" "errors" "fmt" "strconv" "strings" ) const ( imageOrdinalFlag32 = uint32(0x80000000) imageOrdinalFlag64 = uint64(0x8000000000000000) maxRepeatedAddresses = uint32(0xF) maxAddressSpread = uint32(0x8000000) addressMask32 = uint32(0x7fffffff) addressMask64 = uint64(0x7fffffffffffffff) maxDllLength = 0x200 maxImportNameLength = 0x200 ) var ( // AnoInvalidThunkAddressOfData is reported when thunk address is too spread out. AnoInvalidThunkAddressOfData = "Thunk Address Of Data too spread out" // AnoManyRepeatedEntries is reported when import directory contains many // entries have the same RVA. AnoManyRepeatedEntries = "Import directory contains many repeated entries" // AnoAddressOfDataBeyondLimits is reported when Thunk AddressOfData goes // beyond limits. AnoAddressOfDataBeyondLimits = "Thunk AddressOfData beyond limits" // AnoImportNoNameNoOrdinal is reported when an import entry does not have // a name neither an ordinal, most probably malformed data. AnoImportNoNameNoOrdinal = "Must have either an ordinal or a name in an import" // ErrDamagedImportTable is reported when the IAT and ILT table length is 0. ErrDamagedImportTable = errors.New( "damaged Import Table information. ILT and/or IAT appear to be broken") ) // ImageImportDescriptor describes the remainder of the import information. // The import directory table contains address information that is used to // resolve fixup references to the entry points within a DLL image. // It consists of an array of import directory entries, one entry for each DLL // to which the image refers. The last directory entry is empty (filled with // null values), which indicates the end of the directory table. type ImageImportDescriptor struct { // The RVA of the import lookup/name table (INT). This table contains a name // or ordinal for each import. The INT is an array of IMAGE_THUNK_DATA structs. OriginalFirstThunk uint32 `json:"original_first_thunk"` // The stamp that is set to zero until the image is bound. After the image // is bound, this field is set to the time/data stamp of the DLL. TimeDateStamp uint32 `json:"time_date_stamp"` // The index of the first forwarder reference (-1 if no forwarders). ForwarderChain uint32 `json:"forwarder_chain"` // The address of an ASCII string that contains the name of the DLL. // This address is relative to the image base. Name uint32 `json:"name"` // The RVA of the import address table (IAT). The contents of this table are // identical to the contents of the import lookup table until the image is bound. FirstThunk uint32 `json:"first_thunk"` } // ImageThunkData32 corresponds to one imported function from the executable. // The entries are an array of 32-bit numbers for PE32 or an array of 64-bit // numbers for PE32+. The ends of both arrays are indicated by an // IMAGE_THUNK_DATA element with a value of zero. // The IMAGE_THUNK_DATA union is a DWORD with these interpretations: // DWORD Function; // Memory address of the imported function // DWORD Ordinal; // Ordinal value of imported API // DWORD AddressOfData; // RVA to an IMAGE_IMPORT_BY_NAME with the imported API name // DWORD ForwarderString;// RVA to a forwarder string type ImageThunkData32 struct { AddressOfData uint32 } // ImageThunkData64 is the PE32+ version of IMAGE_THUNK_DATA. type ImageThunkData64 struct { AddressOfData uint64 } type ThunkData32 struct { ImageThunkData ImageThunkData32 Offset uint32 } type ThunkData64 struct { ImageThunkData ImageThunkData64 Offset uint32 } // ImportFunction represents an imported function in the import table. type ImportFunction struct { // An ASCII string that contains the name to import. This is the string that // must be matched to the public name in the DLL. This string is case // sensitive and terminated by a null byte. Name string `json:"name"` // An index into the export name pointer table. A match is attempted first // with this value. If it fails, a binary search is performed on the DLL's // export name pointer table. Hint uint16 `json:"hint"` // If this is true, import by ordinal. Otherwise, import by name. ByOrdinal bool `json:"by_ordinal"` // A 16-bit ordinal number. This field is used only if the Ordinal/Name Flag // bit field is 1 (import by ordinal). Bits 30-15 or 62-15 must be 0. Ordinal uint32 `json:"ordinal"` // Name Thunk Value (OFT) OriginalThunkValue uint64 `json:"original_thunk_value"` // Address Thunk Value (FT) ThunkValue uint64 `json:"thunk_value"` // Address Thunk RVA. ThunkRVA uint32 `json:"thunk_rva"` // Name Thunk RVA. OriginalThunkRVA uint32 `json:"original_thunk_rva"` } // Import represents an empty entry in the import table. type Import struct { Offset uint32 `json:"offset"` Name string `json:"name"` Functions []ImportFunction `json:"functions"` Descriptor ImageImportDescriptor `json:"descriptor"` } func (pe *File) parseImportDirectory(rva, size uint32) (err error) { for { importDesc := ImageImportDescriptor{} fileOffset := pe.GetOffsetFromRva(rva) importDescSize := uint32(binary.Size(importDesc)) err := pe.structUnpack(&importDesc, fileOffset, importDescSize) // If the RVA is invalid all would blow up. Some EXEs seem to be // specially nasty and have an invalid RVA. if err != nil { return err } // If the structure is all zeros, we reached the end of the list. if importDesc == (ImageImportDescriptor{}) { break } rva += importDescSize // If the array of thunks is somewhere earlier than the import // descriptor we can set a maximum length for the array. Otherwise // just set a maximum length of the size of the file maxLen := uint32(len(pe.data)) - fileOffset if rva > importDesc.OriginalFirstThunk || rva > importDesc.FirstThunk { if rva < importDesc.OriginalFirstThunk { maxLen = rva - importDesc.FirstThunk } else if rva < importDesc.FirstThunk { maxLen = rva - importDesc.OriginalFirstThunk } else { maxLen = Max(rva-importDesc.OriginalFirstThunk, rva-importDesc.FirstThunk) } } var importedFunctions []ImportFunction if pe.Is64 { importedFunctions, err = pe.parseImports64(&importDesc, maxLen) } else { importedFunctions, err = pe.parseImports32(&importDesc, maxLen) } if err != nil { return err } dllName := pe.getStringAtRVA(importDesc.Name, maxDllLength) if !IsValidDosFilename(dllName) { dllName = "*invalid*" continue } pe.Imports = append(pe.Imports, Import{ Offset: fileOffset, Name: string(dllName), Functions: importedFunctions, Descriptor: importDesc, }) } if len(pe.Imports) > 0 { pe.HasImport = true } return nil } func (pe *File) getImportTable32(rva uint32, maxLen uint32, isOldDelayImport bool) ([]ThunkData32, error) { // Setup variables thunkTable := make(map[uint32]*ImageThunkData32) retVal := []ThunkData32{} minAddressOfData := ^uint32(0) maxAddressOfData := uint32(0) repeatedAddress := uint32(0) var size uint32 = 4 addressesOfData := make(map[uint32]bool) startRVA := rva if rva == 0 { return nil, nil } for { if rva >= startRVA+maxLen { pe.logger.Warnf("Error parsing the import table. Entries go beyond bounds.") break } // if we see too many times the same entry we assume it could be // a table containing bogus data (with malicious intent or otherwise) if repeatedAddress >= maxRepeatedAddresses { if !stringInSlice(AnoManyRepeatedEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoManyRepeatedEntries) } } // if the addresses point somewhere but the difference between the // highest and lowest address is larger than maxAddressSpread we assume // a bogus table as the addresses should be contained within a module if maxAddressOfData-minAddressOfData > maxAddressSpread { if !stringInSlice(AnoInvalidThunkAddressOfData, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoInvalidThunkAddressOfData) } } // In its original incarnation in Visual C++ 6.0, all ImgDelayDescr // fields containing addresses used virtual addresses, rather than RVAs. // That is, they contained actual addresses where the delayload data // could be found. These fields are DWORDs, the size of a pointer on the x86. // Now fast-forward to IA-64 support. All of a sudden, 4 bytes isn't // enough to hold a complete address. At this point, Microsoft did the // correct thing and changed the fields containing addresses to RVAs. offset := uint32(0) if isOldDelayImport { oh32 := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) newRVA := rva - oh32.ImageBase offset = pe.GetOffsetFromRva(newRVA) if offset == ^uint32(0) { return nil, nil } } else { offset = pe.GetOffsetFromRva(rva) if offset == ^uint32(0) { return nil, nil } } // Read the image thunk data. thunk := ImageThunkData32{} err := pe.structUnpack(&thunk, offset, size) if err != nil { // pe.logger.Warnf("Error parsing the import table. " + // "Invalid data at RVA: 0x%x", rva) return nil, nil } if thunk == (ImageThunkData32{}) { break } // Check if the AddressOfData lies within the range of RVAs that it's // being scanned, abort if that is the case, as it is very unlikely // to be legitimate data. // Seen in PE with SHA256: // 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c if thunk.AddressOfData >= startRVA && thunk.AddressOfData <= rva { pe.logger.Warnf("Error parsing the import table. "+ "AddressOfData overlaps with THUNK_DATA for THUNK at: "+ "RVA 0x%x", rva) break } if thunk.AddressOfData&imageOrdinalFlag32 > 0 { // If the entry looks like could be an ordinal. if thunk.AddressOfData&0x7fffffff > 0xffff { // but its value is beyond 2^16, we will assume it's a // corrupted and ignore it altogether if !stringInSlice(AnoAddressOfDataBeyondLimits, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) } } } else { // and if it looks like it should be an RVA keep track of the RVAs seen // and store them to study their properties. When certain non-standard // features are detected the parsing will be aborted _, ok := addressesOfData[thunk.AddressOfData] if ok { repeatedAddress++ } else { addressesOfData[thunk.AddressOfData] = true } if thunk.AddressOfData > maxAddressOfData { maxAddressOfData = thunk.AddressOfData } if thunk.AddressOfData < minAddressOfData { minAddressOfData = thunk.AddressOfData } } thunkTable[rva] = &thunk thunkData := ThunkData32{ImageThunkData: thunk, Offset: rva} retVal = append(retVal, thunkData) rva += size } return retVal, nil } func (pe *File) getImportTable64(rva uint32, maxLen uint32, isOldDelayImport bool) ([]ThunkData64, error) { // Setup variables thunkTable := make(map[uint32]*ImageThunkData64) retVal := []ThunkData64{} minAddressOfData := ^uint64(0) maxAddressOfData := uint64(0) repeatedAddress := uint64(0) var size uint32 = 8 addressesOfData := make(map[uint64]bool) startRVA := rva if rva == 0 { return nil, nil } for { if rva >= startRVA+maxLen { pe.logger.Warnf("Error parsing the import table. Entries go beyond bounds.") break } // if we see too many times the same entry we assume it could be // a table containing bogus data (with malicious intent or otherwise) if repeatedAddress >= uint64(maxRepeatedAddresses) { if !stringInSlice(AnoManyRepeatedEntries, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoManyRepeatedEntries) } } // if the addresses point somewhere but the difference between the highest // and lowest address is larger than maxAddressSpread we assume a bogus // table as the addresses should be contained within a module if maxAddressOfData-minAddressOfData > uint64(maxAddressSpread) { if !stringInSlice(AnoInvalidThunkAddressOfData, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoInvalidThunkAddressOfData) } } // In its original incarnation in Visual C++ 6.0, all ImgDelayDescr // fields containing addresses used virtual addresses, rather than RVAs. // That is, they contained actual addresses where the delayload data // could be found. These fields are DWORDs, the size of a pointer on the x86. // Now fast-forward to IA-64 support. All of a sudden, 4 bytes isn't // enough to hold a complete address. At this point, Microsoft did the // correct thing and changed the fields containing addresses to RVAs. offset := uint32(0) if isOldDelayImport { oh64 := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) newRVA := rva - uint32(oh64.ImageBase) offset = pe.GetOffsetFromRva(newRVA) if offset == ^uint32(0) { return nil, nil } } else { offset = pe.GetOffsetFromRva(rva) if offset == ^uint32(0) { return nil, nil } } // Read the image thunk data. thunk := ImageThunkData64{} err := pe.structUnpack(&thunk, offset, size) if err != nil { // pe.logger.Warnf("Error parsing the import table. " + // "Invalid data at RVA: 0x%x", rva) return nil, nil } if thunk == (ImageThunkData64{}) { break } // Check if the AddressOfData lies within the range of RVAs that it's // being scanned, abort if that is the case, as it is very unlikely // to be legitimate data. // Seen in PE with SHA256: // 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c if thunk.AddressOfData >= uint64(startRVA) && thunk.AddressOfData <= uint64(rva) { pe.logger.Warnf("Error parsing the import table. "+ "AddressOfData overlaps with THUNK_DATA for THUNK at: "+ "RVA 0x%x", rva) break } // If the entry looks like could be an ordinal if thunk.AddressOfData&imageOrdinalFlag64 > 0 { // but its value is beyond 2^16, we will assume it's a // corrupted and ignore it altogether if thunk.AddressOfData&0x7fffffff > 0xffff { if !stringInSlice(AnoAddressOfDataBeyondLimits, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) } } // and if it looks like it should be an RVA } else { // keep track of the RVAs seen and store them to study their // properties. When certain non-standard features are detected // the parsing will be aborted _, ok := addressesOfData[thunk.AddressOfData] if ok { repeatedAddress++ } else { addressesOfData[thunk.AddressOfData] = true } if thunk.AddressOfData > maxAddressOfData { maxAddressOfData = thunk.AddressOfData } if thunk.AddressOfData < minAddressOfData { minAddressOfData = thunk.AddressOfData } } thunkTable[rva] = &thunk thunkData := ThunkData64{ImageThunkData: thunk, Offset: rva} retVal = append(retVal, thunkData) rva += size } return retVal, nil } func (pe *File) parseImports32(importDesc interface{}, maxLen uint32) ( []ImportFunction, error) { var OriginalFirstThunk uint32 var FirstThunk uint32 var isOldDelayImport bool switch desc := importDesc.(type) { case *ImageImportDescriptor: OriginalFirstThunk = desc.OriginalFirstThunk FirstThunk = desc.FirstThunk case *ImageDelayImportDescriptor: OriginalFirstThunk = desc.ImportNameTableRVA FirstThunk = desc.ImportAddressTableRVA if desc.Attributes == 0 { isOldDelayImport = true } } // Import Lookup Table (OFT). Contains ordinals or pointers to strings. ilt, err := pe.getImportTable32(OriginalFirstThunk, maxLen, isOldDelayImport) if err != nil { return nil, err } // Import Address Table (FT). May have identical content to ILT if PE file is // not bound. It will contain the address of the imported symbols once // the binary is loaded or if it is already bound. iat, err := pe.getImportTable32(FirstThunk, maxLen, isOldDelayImport) if err != nil { return nil, err } // Some DLLs has IAT or ILT with nil type. if len(iat) == 0 && len(ilt) == 0 { return nil, ErrDamagedImportTable } var table []ThunkData32 if len(ilt) > 0 { table = ilt } else if len(iat) > 0 { table = iat } else { return nil, err } importedFunctions := []ImportFunction{} numInvalid := uint32(0) for idx := uint32(0); idx < uint32(len(table)); idx++ { imp := ImportFunction{} if table[idx].ImageThunkData.AddressOfData > 0 { // If imported by ordinal, we will append the ordinal number if table[idx].ImageThunkData.AddressOfData&imageOrdinalFlag32 > 0 { imp.ByOrdinal = true imp.Ordinal = table[idx].ImageThunkData.AddressOfData & uint32(0xffff) // Original Thunk if uint32(len(ilt)) > idx { imp.OriginalThunkValue = uint64(ilt[idx].ImageThunkData.AddressOfData) imp.OriginalThunkRVA = ilt[idx].Offset } // Thunk if uint32(len(iat)) > idx { imp.ThunkValue = uint64(iat[idx].ImageThunkData.AddressOfData) imp.ThunkRVA = iat[idx].Offset } imp.Name = "#" + strconv.Itoa(int(imp.Ordinal)) } else { imp.ByOrdinal = false if isOldDelayImport { table[idx].ImageThunkData.AddressOfData -= pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase } // Original Thunk if uint32(len(ilt)) > idx { imp.OriginalThunkValue = uint64(ilt[idx].ImageThunkData.AddressOfData & addressMask32) imp.OriginalThunkRVA = ilt[idx].Offset } // Thunk if uint32(len(iat)) > idx { imp.ThunkValue = uint64(iat[idx].ImageThunkData.AddressOfData & addressMask32) imp.ThunkRVA = iat[idx].Offset } // Thunk hintNameTableRva := table[idx].ImageThunkData.AddressOfData & addressMask32 off := pe.GetOffsetFromRva(hintNameTableRva) imp.Hint, err = pe.ReadUint16(off) if err != nil { imp.Hint = ^uint16(0) } imp.Name = pe.getStringAtRVA(table[idx].ImageThunkData.AddressOfData+2, maxImportNameLength) if !IsValidFunctionName(imp.Name) { imp.Name = "*invalid*" } } } // This file bfe97192e8107d52dd7b4010d12b2924 has an invalid table built // in a way that it's parsable but contains invalid entries that lead // pefile to take extremely long amounts of time to parse. It also leads // to extreme memory consumption. To prevent similar cases, if invalid // entries are found in the middle of a table the parsing will be aborted. hasName := len(imp.Name) > 0 if imp.Ordinal == 0 && !hasName { if !stringInSlice(AnoImportNoNameNoOrdinal, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoImportNoNameNoOrdinal) } } // Some PEs appear to interleave valid and invalid imports. Instead of // aborting the parsing altogether we will simply skip the invalid entries. // Although if we see 1000 invalid entries and no legit ones, we abort. if imp.Name == "*invalid*" { if numInvalid > 1000 && numInvalid == idx { return nil, errors.New( `too many invalid names, aborting parsing`) } numInvalid++ continue } importedFunctions = append(importedFunctions, imp) } return importedFunctions, nil } func (pe *File) parseImports64(importDesc interface{}, maxLen uint32) ([]ImportFunction, error) { var OriginalFirstThunk uint32 var FirstThunk uint32 var isOldDelayImport bool switch desc := importDesc.(type) { case *ImageImportDescriptor: OriginalFirstThunk = desc.OriginalFirstThunk FirstThunk = desc.FirstThunk case *ImageDelayImportDescriptor: OriginalFirstThunk = desc.ImportNameTableRVA FirstThunk = desc.ImportAddressTableRVA if desc.Attributes == 0 { isOldDelayImport = true } } // Import Lookup Table. Contains ordinals or pointers to strings. ilt, err := pe.getImportTable64(OriginalFirstThunk, maxLen, isOldDelayImport) if err != nil { return nil, err } // Import Address Table. May have identical content to ILT if PE file is // not bound. It will contain the address of the imported symbols once // the binary is loaded or if it is already bound. iat, err := pe.getImportTable64(FirstThunk, maxLen, isOldDelayImport) if err != nil { return nil, err } // Would crash if IAT or ILT had nil type if len(iat) == 0 && len(ilt) == 0 { return nil, ErrDamagedImportTable } var table []ThunkData64 if len(ilt) > 0 { table = ilt } else if len(iat) > 0 { table = iat } else { return nil, err } importedFunctions := []ImportFunction{} numInvalid := uint32(0) for idx := uint32(0); idx < uint32(len(table)); idx++ { imp := ImportFunction{} if table[idx].ImageThunkData.AddressOfData > 0 { // If imported by ordinal, we will append the ordinal number if table[idx].ImageThunkData.AddressOfData&imageOrdinalFlag64 > 0 { imp.ByOrdinal = true imp.Ordinal = uint32(table[idx].ImageThunkData.AddressOfData) & uint32(0xffff) // Original Thunk if uint32(len(ilt)) > idx { imp.OriginalThunkValue = ilt[idx].ImageThunkData.AddressOfData imp.OriginalThunkRVA = ilt[idx].Offset } // Thunk if uint32(len(iat)) > idx { imp.ThunkValue = iat[idx].ImageThunkData.AddressOfData imp.ThunkRVA = iat[idx].Offset } imp.Name = "#" + strconv.Itoa(int(imp.Ordinal)) } else { imp.ByOrdinal = false if isOldDelayImport { table[idx].ImageThunkData.AddressOfData -= pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase } // Original Thunk if uint32(len(ilt)) > idx { imp.OriginalThunkValue = ilt[idx].ImageThunkData.AddressOfData & addressMask64 imp.OriginalThunkRVA = ilt[idx].Offset } // Thunk if uint32(len(iat)) > idx { imp.ThunkValue = iat[idx].ImageThunkData.AddressOfData & addressMask64 imp.ThunkRVA = iat[idx].Offset } hintNameTableRva := table[idx].ImageThunkData.AddressOfData & addressMask64 off := pe.GetOffsetFromRva(uint32(hintNameTableRva)) imp.Hint = binary.LittleEndian.Uint16(pe.data[off:]) imp.Name = pe.getStringAtRVA(uint32(table[idx].ImageThunkData.AddressOfData+2), maxImportNameLength) if !IsValidFunctionName(imp.Name) { imp.Name = "*invalid*" } } } // This file bfe97192e8107d52dd7b4010d12b2924 has an invalid table built // in a way that it's parsable but contains invalid entries that lead // pefile to take extremely long amounts of time to parse. It also leads // to extreme memory consumption. To prevent similar cases, if invalid // entries are found in the middle of a table the parsing will be aborted. hasName := len(imp.Name) > 0 if imp.Ordinal == 0 && !hasName { if !stringInSlice(AnoImportNoNameNoOrdinal, pe.Anomalies) { pe.Anomalies = append(pe.Anomalies, AnoImportNoNameNoOrdinal) } } // Some PEs appear to interleave valid and invalid imports. Instead of // aborting the parsing altogether we will simply skip the invalid entries. // Although if we see 1000 invalid entries and no legit ones, we abort. if imp.Name == "*invalid*" { if numInvalid > 1000 && numInvalid == idx { return nil, errors.New( `too many invalid names, aborting parsing`) } numInvalid++ continue } importedFunctions = append(importedFunctions, imp) } return importedFunctions, nil } // GetImportEntryInfoByRVA return an import function + index of the entry given // an RVA. func (pe *File) GetImportEntryInfoByRVA(rva uint32) (Import, int) { for _, imp := range pe.Imports { for i, entry := range imp.Functions { if entry.ThunkRVA == rva { return imp, i } } } return Import{}, 0 } // md5hash hashes using md5 algorithm. func md5hash(text string) string { h := md5.New() h.Write([]byte(text)) return hex.EncodeToString(h.Sum(nil)) } // ImpHash calculates the import hash. // Algorithm: // Resolving ordinals to function names when they appear // Converting both DLL names and function names to all lowercase // Removing the file extensions from imported module names // Building and storing the lowercased string . in an ordered list // Generating the MD5 hash of the ordered list func (pe *File) ImpHash() (string, error) { if len(pe.Imports) == 0 { return "", errors.New("no imports found") } extensions := []string{"ocx", "sys", "dll"} var impStrs []string for _, imp := range pe.Imports { var libName string parts := strings.Split(imp.Name, ".") if len(parts) == 2 && stringInSlice(strings.ToLower(parts[1]), extensions) { libName = parts[0] } else { libName = imp.Name } libName = strings.ToLower(libName) for _, function := range imp.Functions { var funcName string if function.ByOrdinal { funcName = OrdLookup(imp.Name, uint64(function.Ordinal), true) } else { funcName = function.Name } if funcName == "" { continue } impStr := fmt.Sprintf("%s.%s", libName, strings.ToLower(funcName)) impStrs = append(impStrs, impStr) } } hash := md5hash(strings.Join(impStrs, ",")) return hash, nil } ================================================ FILE: imports_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestImportEntry struct { entryCount int entryIndex int entry Import } func TestImportDirectory(t *testing.T) { tests := []struct { in string out TestImportEntry }{ { getAbsoluteFilePath("test/kernel32.dll"), TestImportEntry{ entryCount: 96, entryIndex: 34, entry: Import{ Offset: 0xa6d94, Name: "api-ms-win-core-namedpipe-l1-2-1.dll", Descriptor: ImageImportDescriptor{ OriginalFirstThunk: 0xa9a38, TimeDateStamp: 0x0, ForwarderChain: 0x0, Name: 0xaeeb8, FirstThunk: 0x82978, }, Functions: []ImportFunction{ { Name: "GetNamedPipeHandleStateW", Hint: 0x6, ByOrdinal: false, Ordinal: 0x0, OriginalThunkValue: 0xaee00, ThunkValue: 0xaee00, ThunkRVA: 0x82978, OriginalThunkRVA: 0xa9a38, }, }, }, }, }, { getAbsoluteFilePath("test/impbyord.exe"), TestImportEntry{ entryCount: 2, entryIndex: 1, entry: Import{ Offset: 0x284, Name: "impbyord.exe", Descriptor: ImageImportDescriptor{ OriginalFirstThunk: 0x10b4, TimeDateStamp: 0x0, ForwarderChain: 0x0, Name: 0x10d0, FirstThunk: 0x1058, }, Functions: []ImportFunction{ { Name: "#35", Hint: 0x0, ByOrdinal: true, Ordinal: 0x23, OriginalThunkValue: 0x80000023, ThunkValue: 0x10b4, ThunkRVA: 0x1058, OriginalThunkRVA: 0x10b4, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryImport] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryImport] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseImportDirectory(va, size) if err != nil { t.Fatalf("parseImportDirectory(%s) failed, reason: %v", tt.in, err) } got := file.Imports if len(got) != tt.out.entryCount { t.Errorf("imports entry count assertion failed, got %v, want %v", len(got), tt.out.entryCount) } impFunc := file.Imports[tt.out.entryIndex] if !reflect.DeepEqual(impFunc, tt.out.entry) { t.Errorf("import function entry assertion failed, got %v, want %v", impFunc, tt.out.entry) } }) } } func TestImpHash(t *testing.T) { for _, tt := range []struct { in string out string }{ {getAbsoluteFilePath("test/putty.exe"), "2e3215acc61253e5fa73a840384e9720"}, {getAbsoluteFilePath("test/01008963d32f5cc17b64c31446386ee5b36a7eab6761df87a2989ba9394d8f3d"), "431cb9bbc479c64cb0d873043f4de547"}, {getAbsoluteFilePath("test/0103daa751660333b7ae5f098795df58f07e3031563e042d2eb415bffa71fe7a"), "8b58a51c1fff9c4a944265c1fe0fab74"}, {getAbsoluteFilePath("test/0585495341e0ffaae1734acb78708ff55cd3612d844672d37226ef63d12652d0"), "e4290fa6afc89d56616f34ebbd0b1f2c"}, } { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } if err := file.Parse(); err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } impHash, err := file.ImpHash() if err != nil { t.Fatalf("ImpHash(%s) failed, reason: %v", tt.in, err) } if impHash != tt.out { t.Errorf("ImpHash(%s) got %v, want %v", tt.in, impHash, tt.out) } }) } } ================================================ FILE: loadconfig.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. // References: // https://www.virtualbox.org/svn/vbox/trunk/include/iprt/formats/pecoff.h // https://github.com/hdoc/llvm-project/blob/release/15.x/llvm/include/llvm/Object/COFF.h // https://ffri.github.io/ProjectChameleon/new_reloc_chpev2/ // https://blogs.blackberry.com/en/2019/09/teardown-windows-10-on-arm-x86-emulation // DVRT: https://www.alex-ionescu.com/?p=323 // https://xlab.tencent.com/en/2016/11/02/return-flow-guard/ // https://denuvosoftwaresolutions.github.io/DVRT/dvrt.html // BlueHat v18 || Retpoline: The Anti sectre type 2 mitigation in windows: https://www.youtube.com/watch?v=ZfxXjDQRpsU package pe import ( "bytes" "encoding/binary" "fmt" "reflect" ) // ImageGuardFlagType represents the type for load configuration image guard flags. type ImageGuardFlagType uint8 // GFIDS table entry flags. const ( // ImageGuardFlagFIDSuppressed indicates that the call target is explicitly // suppressed (do not treat it as valid for purposes of CFG). ImageGuardFlagFIDSuppressed = 0x1 // ImageGuardFlagExportSuppressed indicates that the call target is export // suppressed. See Export suppression for more details. ImageGuardFlagExportSuppressed = 0x2 ) // The GuardFlags field contains a combination of one or more of the // following flags and subfields: const ( // ImageGuardCfInstrumented indicates that the module performs control flow // integrity checks using system-supplied support. ImageGuardCfInstrumented = 0x00000100 // ImageGuardCfWInstrumented indicates that the module performs control // flow and write integrity checks. ImageGuardCfWInstrumented = 0x00000200 // ImageGuardCfFunctionTablePresent indicates that the module contains // valid control flow target metadata. ImageGuardCfFunctionTablePresent = 0x00000400 // ImageGuardSecurityCookieUnused indicates that the module does not make // use of the /GS security cookie. ImageGuardSecurityCookieUnused = 0x00000800 // ImageGuardProtectDelayLoadIAT indicates that the module supports read // only delay load IAT. ImageGuardProtectDelayLoadIAT = 0x00001000 // ImageGuardDelayLoadIATInItsOwnSection indicates that the Delayload // import table in its own .didat section (with nothing else in it) that // can be freely reprotected. ImageGuardDelayLoadIATInItsOwnSection = 0x00002000 // ImageGuardCfExportSuppressionInfoPresent indicates that the module // contains suppressed export information. This also infers that the // address taken IAT table is also present in the load config. ImageGuardCfExportSuppressionInfoPresent = 0x00004000 // ImageGuardCfEnableExportSuppression indicates that the module enables // suppression of exports. ImageGuardCfEnableExportSuppression = 0x00008000 // ImageGuardCfLongJumpTablePresent indicates that the module contains // long jmp target information. ImageGuardCfLongJumpTablePresent = 0x00010000 ) const ( // ImageGuardCfFunctionTableSizeMask indicates that the mask for the // subfield that contains the stride of Control Flow Guard function table // entries (that is, the additional count of bytes per table entry). ImageGuardCfFunctionTableSizeMask = 0xF0000000 // ImageGuardCfFunctionTableSizeShift indicates the shift to right-justify // Guard CF function table stride. ImageGuardCfFunctionTableSizeShift = 28 ) const ( ImageDynamicRelocationGuardRfPrologue = 0x00000001 ImageDynamicRelocationGuardREpilogue = 0x00000002 ) // Software enclave information. const ( ImageEnclaveLongIDLength = 32 ImageEnclaveShortIDLength = 16 ) const ( // ImageEnclaveImportMatchNone indicates that none of the identifiers of the // image need to match the value in the import record. ImageEnclaveImportMatchNone = 0x00000000 // ImageEnclaveImportMatchUniqueId indicates that the value of the enclave // unique identifier of the image must match the value in the import record. // Otherwise, loading of the image fails. ImageEnclaveImportMatchUniqueID = 0x00000001 // ImageEnclaveImportMatchAuthorId indicates that the value of the enclave // author identifier of the image must match the value in the import record. // Otherwise, loading of the image fails. If this flag is set and the import // record indicates an author identifier of all zeros, the imported image // must be part of the Windows installation. ImageEnclaveImportMatchAuthorID = 0x00000002 // ImageEnclaveImportMatchFamilyId indicates that the value of the enclave // family identifier of the image must match the value in the import record. // Otherwise, loading of the image fails. ImageEnclaveImportMatchFamilyID = 0x00000003 // ImageEnclaveImportMatchImageId indicates that the value of the enclave // image identifier must match the value in the import record. Otherwise, // loading of the image fails. ImageEnclaveImportMatchImageID = 0x00000004 ) // ImageLoadConfigDirectory32 Contains the load configuration data of an image for x86 binaries. type ImageLoadConfigDirectory32 struct { // The actual size of the structure inclusive. May differ from the size // given in the data directory for Windows XP and earlier compatibility. Size uint32 `json:"size"` // Date and time stamp value. TimeDateStamp uint32 `json:"time_date_stamp"` // Major version number. MajorVersion uint16 `json:"major_version"` // Minor version number. MinorVersion uint16 `json:"minor_version"` // The global loader flags to clear for this process as the loader starts // the process. GlobalFlagsClear uint32 `json:"global_flags_clear"` // The global loader flags to set for this process as the loader starts the // process. GlobalFlagsSet uint32 `json:"global_flags_set"` // The default timeout value to use for this process's critical sections // that are abandoned. CriticalSectionDefaultTimeout uint32 `json:"critical_section_default_timeout"` // Memory that must be freed before it is returned to the system, in bytes. DeCommitFreeBlockThreshold uint32 `json:"de_commit_free_block_threshold"` // Total amount of free memory, in bytes. DeCommitTotalFreeThreshold uint32 `json:"de_commit_total_free_threshold"` // [x86 only] The VA of a list of addresses where the LOCK prefix is used so // that they can be replaced with NOP on single processor machines. LockPrefixTable uint32 `json:"lock_prefix_table"` // Maximum allocation size, in bytes. MaximumAllocationSize uint32 `json:"maximum_allocation_size"` // Maximum virtual memory size, in bytes. VirtualMemoryThreshold uint32 `json:"virtual_memory_threshold"` // Process heap flags that correspond to the first argument of the HeapCreate // function. These flags apply to the process heap that is created during // process startup. ProcessHeapFlags uint32 `json:"process_heap_flags"` // Setting this field to a non-zero value is equivalent to calling // SetProcessAffinityMask with this value during process startup (.exe only) ProcessAffinityMask uint32 `json:"process_affinity_mask"` // The service pack version identifier. CSDVersion uint16 `json:"csd_version"` // Must be zero. DependentLoadFlags uint16 `json:"dependent_load_flags"` // Reserved for use by the system. EditList uint32 `json:"edit_list"` // A pointer to a cookie that is used by Visual C++ or GS implementation. SecurityCookie uint32 `json:"security_cookie"` // [x86 only] The VA of the sorted table of RVAs of each valid, unique SE // handler in the image. SEHandlerTable uint32 `json:"se_handler_table"` // [x86 only] The count of unique handlers in the table. SEHandlerCount uint32 `json:"se_handler_count"` // The VA where Control Flow Guard check-function pointer is stored. GuardCFCheckFunctionPointer uint32 `json:"guard_cf_check_function_pointer"` // The VA where Control Flow Guard dispatch-function pointer is stored. GuardCFDispatchFunctionPointer uint32 `json:"guard_cf_dispatch_function_pointer"` // The VA of the sorted table of RVAs of each Control Flow Guard function in // the image. GuardCFFunctionTable uint32 `json:"guard_cf_function_table"` // The count of unique RVAs in the above table. GuardCFFunctionCount uint32 `json:"guard_cf_function_count"` // Control Flow Guard related flags. GuardFlags uint32 `json:"guard_flags"` // Code integrity information. CodeIntegrity ImageLoadConfigCodeIntegrity `json:"code_integrity"` // The VA where Control Flow Guard address taken IAT table is stored. GuardAddressTakenIATEntryTable uint32 `json:"guard_address_taken_iat_entry_table"` // The count of unique RVAs in the above table. GuardAddressTakenIATEntryCount uint32 `json:"guard_address_taken_iat_entry_count"` // The VA where Control Flow Guard long jump target table is stored. GuardLongJumpTargetTable uint32 `json:"guard_long_jump_target_table"` // The count of unique RVAs in the above table. GuardLongJumpTargetCount uint32 `json:"guard_long_jump_target_count"` DynamicValueRelocTable uint32 `json:"dynamic_value_reloc_table"` // Not sure when this was renamed from HybridMetadataPointer. CHPEMetadataPointer uint32 `json:"chpe_metadata_pointer"` GuardRFFailureRoutine uint32 `json:"guard_rf_failure_routine"` GuardRFFailureRoutineFunctionPointer uint32 `json:"guard_rf_failure_routine_function_pointer"` DynamicValueRelocTableOffset uint32 `json:"dynamic_value_reloc_table_offset"` DynamicValueRelocTableSection uint16 `json:"dynamic_value_reloc_table_section"` Reserved2 uint16 `json:"reserved_2"` GuardRFVerifyStackPointerFunctionPointer uint32 `json:"guard_rf_verify_stack_pointer_function_pointer"` HotPatchTableOffset uint32 `json:"hot_patch_table_offset"` Reserved3 uint32 `json:"reserved_3"` EnclaveConfigurationPointer uint32 `json:"enclave_configuration_pointer"` VolatileMetadataPointer uint32 `json:"volatile_metadata_pointer"` GuardEHContinuationTable uint32 `json:"guard_eh_continuation_table"` GuardEHContinuationCount uint32 `json:"guard_eh_continuation_count"` GuardXFGCheckFunctionPointer uint32 `json:"guard_xfg_check_function_pointer"` GuardXFGDispatchFunctionPointer uint32 `json:"guard_xfg_dispatch_function_pointer"` GuardXFGTableDispatchFunctionPointer uint32 `json:"guard_xfg_table_dispatch_function_pointer"` CastGuardOSDeterminedFailureMode uint32 `json:"cast_guard_os_determined_failure_mode"` GuardMemcpyFunctionPointer uint32 `json:"guard_memcpy_function_pointer"` } // ImageLoadConfigDirectory64 Contains the load configuration data of an image for x64 binaries. type ImageLoadConfigDirectory64 struct { // The actual size of the structure inclusive. May differ from the size // given in the data directory for Windows XP and earlier compatibility. Size uint32 `json:"size"` // Date and time stamp value. TimeDateStamp uint32 `json:"time_date_stamp"` // Major version number. MajorVersion uint16 `json:"major_version"` // Minor version number. MinorVersion uint16 `json:"minor_version"` // The global loader flags to clear for this process as the loader starts // the process. GlobalFlagsClear uint32 `json:"global_flags_clear"` // The global loader flags to set for this process as the loader starts the // process. GlobalFlagsSet uint32 `json:"global_flags_set"` // The default timeout value to use for this process's critical sections // that are abandoned. CriticalSectionDefaultTimeout uint32 `json:"critical_section_default_timeout"` // Memory that must be freed before it is returned to the system, in bytes. DeCommitFreeBlockThreshold uint64 `json:"de_commit_free_block_threshold"` // Total amount of free memory, in bytes. DeCommitTotalFreeThreshold uint64 `json:"de_commit_total_free_threshold"` // [x86 only] The VA of a list of addresses where the LOCK prefix is used so // that they can be replaced with NOP on single processor machines. LockPrefixTable uint64 `json:"lock_prefix_table"` // Maximum allocation size, in bytes. MaximumAllocationSize uint64 `json:"maximum_allocation_size"` // Maximum virtual memory size, in bytes. VirtualMemoryThreshold uint64 `json:"virtual_memory_threshold"` // Setting this field to a non-zero value is equivalent to calling // SetProcessAffinityMask with this value during process startup (.exe only) ProcessAffinityMask uint64 `json:"process_affinity_mask"` // Process heap flags that correspond to the first argument of the HeapCreate // function. These flags apply to the process heap that is created during // process startup. ProcessHeapFlags uint32 `json:"process_heap_flags"` // The service pack version identifier. CSDVersion uint16 `json:"csd_version"` // Must be zero. DependentLoadFlags uint16 `json:"dependent_load_flags"` // Reserved for use by the system. EditList uint64 `json:"edit_list"` // A pointer to a cookie that is used by Visual C++ or GS implementation. SecurityCookie uint64 `json:"security_cookie"` // [x86 only] The VA of the sorted table of RVAs of each valid, unique SE // handler in the image. SEHandlerTable uint64 `json:"se_handler_table"` // [x86 only] The count of unique handlers in the table. SEHandlerCount uint64 `json:"se_handler_count"` // The VA where Control Flow Guard check-function pointer is stored. GuardCFCheckFunctionPointer uint64 `json:"guard_cf_check_function_pointer"` // The VA where Control Flow Guard dispatch-function pointer is stored. GuardCFDispatchFunctionPointer uint64 `json:"guard_cf_dispatch_function_pointer"` // The VA of the sorted table of RVAs of each Control Flow Guard function in // the image. GuardCFFunctionTable uint64 `json:"guard_cf_function_table"` // The count of unique RVAs in the above table. GuardCFFunctionCount uint64 `json:"guard_cf_function_count"` // Control Flow Guard related flags. GuardFlags uint32 `json:"guard_flags"` // Code integrity information. CodeIntegrity ImageLoadConfigCodeIntegrity `json:"code_integrity"` // The VA where Control Flow Guard address taken IAT table is stored. GuardAddressTakenIATEntryTable uint64 `json:"guard_address_taken_iat_entry_table"` // The count of unique RVAs in the above table. GuardAddressTakenIATEntryCount uint64 `json:"guard_address_taken_iat_entry_count"` // The VA where Control Flow Guard long jump target table is stored. GuardLongJumpTargetTable uint64 `json:"guard_long_jump_target_table"` // The count of unique RVAs in the above table. GuardLongJumpTargetCount uint64 `json:"guard_long_jump_target_count"` DynamicValueRelocTable uint64 `json:"dynamic_value_reloc_table"` // Not sure when this was renamed from HybridMetadataPointer. CHPEMetadataPointer uint64 `json:"chpe_metadata_pointer"` GuardRFFailureRoutine uint64 `json:"guard_rf_failure_routine"` GuardRFFailureRoutineFunctionPointer uint64 `json:"guard_rf_failure_routine_function_pointer"` DynamicValueRelocTableOffset uint32 `json:"dynamic_value_reloc_table_offset"` DynamicValueRelocTableSection uint16 `json:"dynamic_value_reloc_table_section"` Reserved2 uint16 `json:"reserved_2"` GuardRFVerifyStackPointerFunctionPointer uint64 `json:"guard_rf_verify_stack_pointer_function_pointer"` HotPatchTableOffset uint32 `json:"hot_patch_table_offset"` Reserved3 uint32 `json:"reserved_3"` EnclaveConfigurationPointer uint64 `json:"enclave_configuration_pointer"` VolatileMetadataPointer uint64 `json:"volatile_metadata_pointer"` GuardEHContinuationTable uint64 `json:"guard_eh_continuation_table"` GuardEHContinuationCount uint64 `json:"guard_eh_continuation_count"` GuardXFGCheckFunctionPointer uint64 `json:"guard_xfg_check_function_pointer"` GuardXFGDispatchFunctionPointer uint64 `json:"guard_xfg_dispatch_function_pointer"` GuardXFGTableDispatchFunctionPointer uint64 `json:"guard_xfg_table_dispatch_function_pointer"` CastGuardOSDeterminedFailureMode uint64 `json:"cast_guard_os_determined_failure_mode"` GuardMemcpyFunctionPointer uint64 `json:"guard_memcpy_function_pointer"` } // ImageCHPEMetadataX86 represents the X86_IMAGE_CHPE_METADATA_X86. type ImageCHPEMetadataX86 struct { Version uint32 `json:"version"` CHPECodeAddressRangeOffset uint32 `json:"chpe_code_address_range_offset"` CHPECodeAddressRangeCount uint32 `json:"chpe_code_address_range_count"` WoWA64ExceptionHandlerFunctionPtr uint32 `json:"wow_a64_exception_handler_function_ptr"` WoWA64DispatchCallFunctionPtr uint32 `json:"wow_a64_dispatch_call_function_ptr"` WoWA64DispatchIndirectCallFunctionPtr uint32 `json:"wow_a64_dispatch_indirect_call_function_ptr"` WoWA64DispatchIndirectCallCfgFunctionPtr uint32 `json:"wow_a64_dispatch_indirect_call_cfg_function_ptr"` WoWA64DispatchRetFunctionPtr uint32 `json:"wow_a64_dispatch_ret_function_ptr"` WoWA64DispatchRetLeafFunctionPtr uint32 `json:"wow_a64_dispatch_ret_leaf_function_ptr"` WoWA64DispatchJumpFunctionPtr uint32 `json:"wow_a64_dispatch_jump_function_ptr"` CompilerIATPointer uint32 `json:"compiler_iat_pointer"` // Present if Version >= 2 WoWA64RDTSCFunctionPtr uint32 `json:"wow_a64_rdtsc_function_ptr"` // Present if Version >= 3 } type CodeRange struct { Begin uint32 `json:"begin"` Length uint32 `json:"length"` Machine uint8 `json:"machine"` } type CompilerIAT struct { RVA uint32 `json:"rva"` Value uint32 `json:"value"` Description string `json:"description"` } type HybridPE struct { CHPEMetadata interface{} `json:"chpe_metadata"` CodeRanges []CodeRange `json:"code_ranges"` CompilerIAT []CompilerIAT `json:"compiler_iat"` } // ImageDynamicRelocationTable represents the DVRT header. type ImageDynamicRelocationTable struct { // Until now, there is only one version of the DVRT header (1).. Version uint32 `json:"version"` // Size represents the number of bytes after the header that contains // retpoline information. Size uint32 `json:"size"` // IMAGE_DYNAMIC_RELOCATION DynamicRelocations[0]; } // Dynamic value relocation entries following IMAGE_DYNAMIC_RELOCATION_TABLE. // Each block starts with the header. // ImageDynamicRelocation32 represents the 32-bit version of a reloc entry. type ImageDynamicRelocation32 struct { // Symbol field identifies one of the existing types of dynamic relocations // so far (values 3, 4 and 5). Symbol uint32 `json:"symbol"` // Then, for each page, there is a block that starts with a relocation entry. // BaseRelocSize represents the size of the block. BaseRelocSize uint32 `json:"base_reloc_size"` // IMAGE_BASE_RELOCATION BaseRelocations[0]; } // ImageDynamicRelocation64 represents the 64-bit version of a reloc entry. type ImageDynamicRelocation64 struct { // Symbol field identifies one of the existing types of dynamic relocations // so far (values 3, 4 and 5). Symbol uint64 `json:"symbol"` // Then, for each page, there is a block that starts with a relocation entry. // BaseRelocSize represents the size of the block. BaseRelocSize uint32 `json:"base_reloc_size"` // IMAGE_BASE_RELOCATION BaseRelocations[0]; } type ImageDynamicRelocation32v2 struct { HeaderSize uint32 `json:"header_size"` FixupInfoSize uint32 `json:"fixup_info_size"` Symbol uint32 `json:"symbol"` SymbolGroup uint32 `json:"symbol_group"` Flags uint32 `json:"flags"` // ... variable length header fields // UCHAR FixupInfo[FixupInfoSize] } type ImageDynamicRelocation64v2 struct { HeaderSize uint32 `json:"header_size"` FixupInfoSize uint32 `json:"fixup_info_size"` Symbol uint64 `json:"symbol"` SymbolGroup uint32 `json:"symbol_group"` Flags uint32 `json:"flags"` // ... variable length header fields // UCHAR FixupInfo[FixupInfoSize] } type ImagePrologueDynamicRelocationHeader struct { PrologueByteCount uint8 `json:"prologue_byte_count"` // UCHAR PrologueBytes[PrologueByteCount]; } type ImageEpilogueDynamicRelocationHeader struct { EpilogueCount uint32 `json:"epilogue_count"` EpilogueByteCount uint8 `json:"epilogue_byte_count"` BranchDescriptorElementSize uint8 `json:"branch_descriptor_element_size"` BranchDescriptorCount uint8 `json:"branch_descriptor_count"` // UCHAR BranchDescriptors[...]; // UCHAR BranchDescriptorBitMap[...]; } type CFGFunction struct { // RVA of the target CFG call. RVA uint32 `json:"rva"` // Flags attached to each GFIDS entry if any call targets have metadata. Flags ImageGuardFlagType `json:"flags"` Description string `json:"description"` } type CFGIATEntry struct { RVA uint32 `json:"rva"` IATValue uint32 `json:"iat_value"` INTValue uint32 `json:"int_value"` Description string `json:"description"` } type RelocBlock struct { ImgBaseReloc ImageBaseRelocation `json:"img_base_reloc"` TypeOffsets []interface{} `json:"type_offsets"` } type RelocEntry struct { // Could be ImageDynamicRelocation32{} or ImageDynamicRelocation64{} ImageDynamicRelocation interface{} `json:"image_dynamic_relocation"` RelocBlocks []RelocBlock `json:"reloc_blocks"` } // ImageImportControlTransferDynamicRelocation represents the Imported Address // Retpoline (type 3), size = 4 bytes. type ImageImportControlTransferDynamicRelocation struct { PageRelativeOffset uint16 `json:"page_relative_offset"` // (12 bits) // 1 - the opcode is a CALL // 0 - the opcode is a JMP. IndirectCall uint16 `json:"indirect_call"` // (1 bit) IATIndex uint32 `json:"iat_index"` // (19 bits) } // ImageIndirectControlTransferDynamicRelocation represents the Indirect Branch // Retpoline (type 4), size = 2 bytes. type ImageIndirectControlTransferDynamicRelocation struct { PageRelativeOffset uint16 `json:"page_relative_offset"` // (12 bits) IndirectCall uint8 `json:"indirect_call"` // (1 bit) RexWPrefix uint8 `json:"rex_w_prefix"` // (1 bit) CfgCheck uint8 `json:"cfg_check"` // (1 bit) Reserved uint8 `json:"reserved"` // (1 bit) } // ImageSwitchableBranchDynamicRelocation represents the Switchable Retpoline // (type 5), size = 2 bytes. type ImageSwitchableBranchDynamicRelocation struct { PageRelativeOffset uint16 `json:"page_relative_offset"` // (12 bits) RegisterNumber uint16 `json:"register_number"` // (4 bits) } // DVRT represents the Dynamic Value Relocation Table. // The DVRT was originally introduced back in the Windows 10 Creators Update to // improve kernel address space layout randomization (KASLR). It allowed the // memory manager’s page frame number (PFN) database and page table self-map to // be assigned dynamic addresses at runtime. The DVRT is stored directly in the // binary and contains a series of relocation entries for each symbol (i.e. // address) that is to be relocated. The relocation entries are themselves // arranged in a hierarchical fashion grouped first by symbol and then by // containing page to allow for a compact description of all locations in the // binary that reference a relocatable symbol. // Reference: https://techcommunity.microsoft.com/t5/windows-os-platform-blog/mitigating-spectre-variant-2-with-retpoline-on-windows/ba-p/295618 type DVRT struct { ImageDynamicRelocationTable `json:"image_dynamic_relocation_table"` Entries []RelocEntry `json:"entries"` } type Enclave struct { // Points to either ImageEnclaveConfig32{} or ImageEnclaveConfig64{}. Config interface{} `json:"config"` Imports []ImageEnclaveImport `json:"imports"` } type RangeTableEntry struct { RVA uint32 `json:"rva"` Size uint32 `json:"size"` } type VolatileMetadata struct { Struct ImageVolatileMetadata `json:"struct"` AccessRVATable []uint32 `json:"access_rva_table"` InfoRangeTable []RangeTableEntry `json:"info_range_table"` } type LoadConfig struct { Struct interface{} `json:"struct"` SEH []uint32 `json:"seh"` GFIDS []CFGFunction `json:"gfids"` CFGIAT []CFGIATEntry `json:"cfgiat"` CFGLongJump []uint32 `json:"cfg_long_jump"` CHPE *HybridPE `json:"chpe"` DVRT *DVRT `json:"dvrt"` Enclave *Enclave `json:"enclave"` VolatileMetadata *VolatileMetadata `json:"volatile_metadata"` } // ImageLoadConfigCodeIntegrity Code Integrity in load config (CI). type ImageLoadConfigCodeIntegrity struct { // Flags to indicate if CI information is available, etc. Flags uint16 `json:"flags"` // 0xFFFF means not available Catalog uint16 `json:"catalog"` CatalogOffset uint32 `json:"catalog_offset"` // Additional bitmask to be defined later Reserved uint32 `json:"reserved"` } type ImageEnclaveConfig32 struct { // The size of the IMAGE_ENCLAVE_CONFIG32 structure, in bytes. Size uint32 `json:"size"` // The minimum size of the IMAGE_ENCLAVE_CONFIG32 structure that the image // loader must be able to process in order for the enclave to be usable. // This member allows an enclave to inform an earlier version of the image // loader that the image loader can safely load the enclave and ignore optional // members added to IMAGE_ENCLAVE_CONFIG32 for later versions of the enclave. // If the size of IMAGE_ENCLAVE_CONFIG32 that the image loader can process is // less than MinimumRequiredConfigSize, the enclave cannot be run securely. // If MinimumRequiredConfigSize is zero, the minimum size of the // IMAGE_ENCLAVE_CONFIG32 structure that the image loader must be able to // process in order for the enclave to be usable is assumed to be the size // of the structure through and including the MinimumRequiredConfigSize member. MinimumRequiredConfigSize uint32 `json:"minimum_required_config_size"` // A flag that indicates whether the enclave permits debugging. PolicyFlags uint32 `json:"policy_flags"` // The number of images in the array of images that the ImportList member // points to. NumberOfImports uint32 `json:"number_of_imports"` // The relative virtual address of the array of images that the enclave // image may import, with identity information for each image. ImportList uint32 `json:"import_list"` // The size of each image in the array of images that the ImportList member // points to. ImportEntrySize uint32 `json:"import_entry_size"` // The family identifier that the author of the enclave assigned to the enclave. FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` // The image identifier that the author of the enclave assigned to the enclave. ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` // The version number that the author of the enclave assigned to the enclave. ImageVersion uint32 `json:"image_version"` // The security version number that the author of the enclave assigned to // the enclave. SecurityVersion uint32 `json:"security_version"` // The expected virtual size of the private address range for the enclave, // in bytes. EnclaveSize uint32 `json:"enclave_size"` // The maximum number of threads that can be created within the enclave. NumberOfThreads uint32 `json:"number_of_threads"` // A flag that indicates whether the image is suitable for use as the // primary image in the enclave. EnclaveFlags uint32 `json:"enclave_flags"` } type ImageEnclaveConfig64 struct { // The size of the IMAGE_ENCLAVE_CONFIG32 structure, in bytes. Size uint32 `json:"size"` // The minimum size of the IMAGE_ENCLAVE_CONFIG32 structure that the image // loader must be able to process in order for the enclave to be usable. // This member allows an enclave to inform an earlier version of the image // loader that the image loader can safely load the enclave and ignore // optional members added to IMAGE_ENCLAVE_CONFIG32 for later versions of // the enclave. // If the size of IMAGE_ENCLAVE_CONFIG32 that the image loader can process // is less than MinimumRequiredConfigSize, the enclave cannot be run securely. // If MinimumRequiredConfigSize is zero, the minimum size of the // IMAGE_ENCLAVE_CONFIG32 structure that the image loader must be able to // process in order for the enclave to be usable is assumed to be the size // of the structure through and including the MinimumRequiredConfigSize member. MinimumRequiredConfigSize uint32 `json:"minimum_required_config_size"` // A flag that indicates whether the enclave permits debugging. PolicyFlags uint32 `json:"policy_flags"` // The number of images in the array of images that the ImportList member // points to. NumberOfImports uint32 `json:"number_of_imports"` // The relative virtual address of the array of images that the enclave // image may import, with identity information for each image. ImportList uint32 `json:"import_list"` // The size of each image in the array of images that the ImportList member // points to. ImportEntrySize uint32 `json:"import_entry_size"` // The family identifier that the author of the enclave assigned to the enclave. FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` // The image identifier that the author of the enclave assigned to the enclave. ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` // The version number that the author of the enclave assigned to the enclave. ImageVersion uint32 `json:"image_version"` // The security version number that the author of the enclave assigned to the enclave. SecurityVersion uint32 `json:"security_version"` // The expected virtual size of the private address range for the enclave,in bytes. EnclaveSize uint64 `json:"enclave_size"` // The maximum number of threads that can be created within the enclave. NumberOfThreads uint32 `json:"number_of_threads"` // A flag that indicates whether the image is suitable for use as the primary // image in the enclave. EnclaveFlags uint32 `json:"enclave_flags"` } // ImageEnclaveImport defines a entry in the array of images that an enclave can import. type ImageEnclaveImport struct { // The type of identifier of the image that must match the value in the import record. MatchType uint32 `json:"match_type"` // The minimum enclave security version that each image must have for the // image to be imported successfully. The image is rejected unless its // enclave security version is equal to or greater than the minimum value in // the import record. Set the value in the import record to zero to turn off // the security version check. MinimumSecurityVersion uint32 `json:"minimum_security_version"` // The unique identifier of the primary module for the enclave, if the // MatchType member is IMAGE_ENCLAVE_IMPORT_MATCH_UNIQUE_ID. Otherwise, // the author identifier of the primary module for the enclave.. UniqueOrAuthorID [ImageEnclaveLongIDLength]uint8 `json:"unique_or_author_id"` // The family identifier of the primary module for the enclave. FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` // The image identifier of the primary module for the enclave. ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` // The relative virtual address of a NULL-terminated string that contains // the same value found in the import directory for the image. ImportName uint32 `json:"import_name"` // Reserved. Reserved uint32 `json:"reserved"` } type ImageVolatileMetadata struct { Size uint32 `json:"size"` Version uint32 `json:"version"` VolatileAccessTable uint32 `json:"volatile_access_table"` VolatileAccessTableSize uint32 `json:"volatile_access_table_size"` VolatileInfoRangeTable uint32 `json:"volatile_info_range_table"` VolatileInfoRangeTableSize uint32 `json:"volatile_info_range_table_size"` } // The load configuration structure (IMAGE_LOAD_CONFIG_DIRECTORY) was formerly // used in very limited cases in the Windows NT operating system itself to // describe various features too difficult or too large to describe in the file // header or optional header of the image. Current versions of the Microsoft // linker and Windows XP and later versions of Windows use a new version of this // structure for 32-bit x86-based systems that include reserved SEH technology. // The data directory entry for a pre-reserved SEH load configuration structure // must specify a particular size of the load configuration structure because // the operating system loader always expects it to be a certain value. In that // regard, the size is really only a version check. For compatibility with // Windows XP and earlier versions of Windows, the size must be 64 for x86 images. func (pe *File) parseLoadConfigDirectory(rva, size uint32) error { // As the load config structure changes over time, // we first read it size to figure out which one we have to cast against. fileOffset := pe.GetOffsetFromRva(rva) structSize, err := pe.ReadUint32(fileOffset) if err != nil { return err } // Use this helper function to print struct size. // PrintLoadConfigStruct() var loadCfg interface{} // Boundary check totalSize := fileOffset + size // Integer overflow if (totalSize > fileOffset) != (size > 0) { return ErrOutsideBoundary } if fileOffset >= pe.size || totalSize > pe.size { return ErrOutsideBoundary } if pe.Is32 { maxSize := uint32(binary.Size(ImageLoadConfigDirectory32{})) if structSize > maxSize { return ErrOutsideBoundary } loadCfg32 := ImageLoadConfigDirectory32{} imgLoadConfigDirectory := make([]byte, binary.Size(loadCfg32)) copy(imgLoadConfigDirectory, pe.data[fileOffset:fileOffset+structSize]) buf := bytes.NewReader(imgLoadConfigDirectory) err = binary.Read(buf, binary.LittleEndian, &loadCfg32) loadCfg = loadCfg32 } else { maxSize := uint32(binary.Size(ImageLoadConfigDirectory64{})) if structSize > maxSize { return ErrOutsideBoundary } loadCfg64 := ImageLoadConfigDirectory64{} imgLoadConfigDirectory := make([]byte, binary.Size(loadCfg64)) copy(imgLoadConfigDirectory, pe.data[fileOffset:fileOffset+structSize]) buf := bytes.NewReader(imgLoadConfigDirectory) err = binary.Read(buf, binary.LittleEndian, &loadCfg64) loadCfg = loadCfg64 } if err != nil { return err } // Save the load config struct. pe.HasLoadCFG = true pe.LoadConfig.Struct = loadCfg // Retrieve SEH handlers if there are any.. if pe.Is32 { handlers := pe.getSEHHandlers() pe.LoadConfig.SEH = handlers } // Retrieve Control Flow Guard Function Targets if there are any. pe.LoadConfig.GFIDS = pe.getControlFlowGuardFunctions() // Retrieve Control Flow Guard IAT entries if there are any. pe.LoadConfig.CFGIAT = pe.getControlFlowGuardIAT() // Retrieve Long jump target functions if there are any. pe.LoadConfig.CFGLongJump = pe.getLongJumpTargetTable() // Retrieve compiled hybrid PE metadata if there are any. pe.LoadConfig.CHPE = pe.getHybridPE() // Retrieve dynamic value relocation table if there are any. pe.LoadConfig.DVRT = pe.getDynamicValueRelocTable() // Retrieve enclave configuration if there are any. pe.LoadConfig.Enclave = pe.getEnclaveConfiguration() // Retrieve volatile metadata table if there are any. pe.LoadConfig.VolatileMetadata = pe.getVolatileMetadata() return nil } // StringifyGuardFlags returns list of strings which describes the GuardFlags. func StringifyGuardFlags(flags uint32) []string { var values []string guardFlagMap := map[uint32]string{ ImageGuardCfInstrumented: "Instrumented", ImageGuardCfWInstrumented: "WriteInstrumented", ImageGuardCfFunctionTablePresent: "TargetMetadata", ImageGuardSecurityCookieUnused: "SecurityCookieUnused", ImageGuardProtectDelayLoadIAT: "DelayLoadIAT", ImageGuardDelayLoadIATInItsOwnSection: "DelayLoadIATInItsOwnSection", ImageGuardCfExportSuppressionInfoPresent: "ExportSuppressionInfoPresent", ImageGuardCfEnableExportSuppression: "EnableExportSuppression", ImageGuardCfLongJumpTablePresent: "LongJumpTablePresent", } for k, s := range guardFlagMap { if k&flags != 0 { values = append(values, s) } } return values } func (pe *File) getSEHHandlers() []uint32 { var handlers []uint32 v := reflect.ValueOf(pe.LoadConfig.Struct) // SEHandlerCount is found in index 19 of the struct. SEHandlerCount := uint32(v.Field(19).Uint()) if SEHandlerCount > 0 { SEHandlerTable := uint32(v.Field(18).Uint()) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva := SEHandlerTable - imageBase for i := uint32(0); i < SEHandlerCount; i++ { offset := pe.GetOffsetFromRva(rva + i*4) handler, err := pe.ReadUint32(offset) if err != nil { return handlers } handlers = append(handlers, handler) } } return handlers } func (pe *File) getControlFlowGuardFunctions() []CFGFunction { v := reflect.ValueOf(pe.LoadConfig.Struct) var GFIDS []CFGFunction var err error // The GFIDS table is an array of 4 + n bytes, where n is given by : // ((GuardFlags & IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_MASK) >> // IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_SHIFT). // This allows for extra metadata to be attached to CFG call targets in // the future. The only currently defined metadata is an optional 1-byte // extra flags field (“GFIDS flags”) that is attached to each GFIDS // entry if any call targets have metadata. GuardFlags := v.Field(24).Uint() n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> ImageGuardCfFunctionTableSizeShift GuardCFFunctionCount := v.Field(23).Uint() if GuardCFFunctionCount > 0 { if pe.Is32 { GuardCFFunctionTable := uint32(v.Field(22).Uint()) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva := GuardCFFunctionTable - imageBase offset := pe.GetOffsetFromRva(rva) for i := uint32(1); i <= uint32(GuardCFFunctionCount); i++ { cfgFunction := CFGFunction{} var cfgFlags uint8 cfgFunction.RVA, err = pe.ReadUint32(offset) if err != nil { return GFIDS } if n > 0 { err = pe.structUnpack(&cfgFlags, offset+4, uint32(n)) if err != nil { return GFIDS } cfgFunction.Flags = ImageGuardFlagType(cfgFlags) if cfgFlags == ImageGuardFlagFIDSuppressed || cfgFlags == ImageGuardFlagExportSuppressed { exportName := pe.GetExportFunctionByRVA(cfgFunction.RVA) cfgFunction.Description = exportName.Name } } GFIDS = append(GFIDS, cfgFunction) offset += 4 + uint32(n) } } else { GuardCFFunctionTable := v.Field(22).Uint() imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva := uint32(GuardCFFunctionTable - imageBase) offset := pe.GetOffsetFromRva(rva) for i := uint64(1); i <= GuardCFFunctionCount; i++ { var cfgFlags uint8 cfgFunction := CFGFunction{} cfgFunction.RVA, err = pe.ReadUint32(offset) if err != nil { return GFIDS } if n > 0 { pe.structUnpack(&cfgFlags, offset+4, uint32(n)) cfgFunction.Flags = ImageGuardFlagType(cfgFlags) if cfgFlags == ImageGuardFlagFIDSuppressed || cfgFlags == ImageGuardFlagExportSuppressed { exportName := pe.GetExportFunctionByRVA(cfgFunction.RVA) cfgFunction.Description = exportName.Name } } GFIDS = append(GFIDS, cfgFunction) offset += 4 + uint32(n) } } } return GFIDS } func (pe *File) getControlFlowGuardIAT() []CFGIATEntry { v := reflect.ValueOf(pe.LoadConfig.Struct) var GFGIAT []CFGIATEntry var err error // GuardAddressTakenIatEntryCount is found in index 27 of the struct. // An image that supports CFG ES includes a GuardAddressTakenIatEntryTable // whose count is provided by the GuardAddressTakenIatEntryCount as part // of its load configuration directory. This table is structurally // formatted the same as the GFIDS table. It uses the same GuardFlags // IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_MASK mechanism to encode extra // optional metadata bytes in the address taken IAT table, though all // metadata bytes must be zero for the address taken IAT table and are // reserved. GuardFlags := v.Field(24).Uint() n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> ImageGuardCfFunctionTableSizeShift GuardAddressTakenIatEntryCount := v.Field(27).Uint() if GuardAddressTakenIatEntryCount > 0 { if pe.Is32 { GuardAddressTakenIatEntryTable := uint32(v.Field(26).Uint()) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva := GuardAddressTakenIatEntryTable - imageBase offset := pe.GetOffsetFromRva(rva) for i := uint32(1); i <= uint32(GuardAddressTakenIatEntryCount); i++ { cfgIATEntry := CFGIATEntry{} cfgIATEntry.RVA, err = pe.ReadUint32(offset) if err != nil { return GFGIAT } imp, index := pe.GetImportEntryInfoByRVA(cfgIATEntry.RVA) if len(imp.Functions) != 0 { cfgIATEntry.INTValue = uint32(imp.Functions[index].OriginalThunkValue) cfgIATEntry.IATValue = uint32(imp.Functions[index].ThunkValue) cfgIATEntry.Description = imp.Name + "!" + imp.Functions[index].Name } GFGIAT = append(GFGIAT, cfgIATEntry) offset += 4 + uint32(n) } } else { GuardAddressTakenIatEntryTable := v.Field(26).Uint() imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva := uint32(GuardAddressTakenIatEntryTable - imageBase) offset := pe.GetOffsetFromRva(rva) for i := uint64(1); i <= GuardAddressTakenIatEntryCount; i++ { cfgIATEntry := CFGIATEntry{} cfgIATEntry.RVA, err = pe.ReadUint32(offset) if err != nil { return GFGIAT } imp, index := pe.GetImportEntryInfoByRVA(cfgIATEntry.RVA) if len(imp.Functions) != 0 { cfgIATEntry.INTValue = uint32(imp.Functions[index].OriginalThunkValue) cfgIATEntry.IATValue = uint32(imp.Functions[index].ThunkValue) cfgIATEntry.Description = imp.Name + "!" + imp.Functions[index].Name } GFGIAT = append(GFGIAT, cfgIATEntry) offset += 4 + uint32(n) } } } return GFGIAT } func (pe *File) getLongJumpTargetTable() []uint32 { v := reflect.ValueOf(pe.LoadConfig.Struct) var longJumpTargets []uint32 // The long jump table represents a sorted array of RVAs that are valid // long jump targets. If a long jump target module sets // IMAGE_GUARD_CF_LONGJUMP_TABLE_PRESENT in its GuardFlags field, then // all long jump targets must be enumerated in the LongJumpTargetTable. GuardFlags := v.Field(24).Uint() n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> ImageGuardCfFunctionTableSizeShift // GuardLongJumpTargetCount is found in index 29 of the struct. GuardLongJumpTargetCount := v.Field(29).Uint() if GuardLongJumpTargetCount > 0 { if pe.Is32 { GuardLongJumpTargetTable := uint32(v.Field(28).Uint()) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva := GuardLongJumpTargetTable - imageBase offset := pe.GetOffsetFromRva(rva) for i := uint32(1); i <= uint32(GuardLongJumpTargetCount); i++ { target, err := pe.ReadUint32(offset) if err != nil { return longJumpTargets } longJumpTargets = append(longJumpTargets, target) offset += 4 + uint32(n) } } else { GuardLongJumpTargetTable := v.Field(28).Uint() imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva := uint32(GuardLongJumpTargetTable - imageBase) offset := pe.GetOffsetFromRva(rva) for i := uint64(1); i <= GuardLongJumpTargetCount; i++ { target, err := pe.ReadUint32(offset) if err != nil { return longJumpTargets } longJumpTargets = append(longJumpTargets, target) offset += 4 + uint32(n) } } } return longJumpTargets } func (pe *File) getHybridPE() *HybridPE { v := reflect.ValueOf(pe.LoadConfig.Struct) // CHPEMetadataPointer is found in index 31 of the struct. CHPEMetadataPointer := v.Field(31).Uint() if CHPEMetadataPointer == 0 { return nil } var rva uint32 if pe.Is32 { imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva = uint32(CHPEMetadataPointer) - imageBase } else { imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva = uint32(CHPEMetadataPointer - imageBase) } // As the image CHPE metadata structure changes over time, // we first read its version to figure out which one we have to // cast against. fileOffset := pe.GetOffsetFromRva(rva) version, err := pe.ReadUint32(fileOffset) if err != nil { return nil } structSize := uint32(0) imgCHPEMetaX86 := ImageCHPEMetadataX86{} switch version { case 0x1: structSize = uint32(binary.Size(imgCHPEMetaX86) - 8) case 0x2: structSize = uint32(binary.Size(imgCHPEMetaX86) - 4) case 0x3: structSize = uint32(binary.Size(imgCHPEMetaX86)) default: // This should be a newer version, default to the latest CHPE version. structSize = uint32(binary.Size(imgCHPEMetaX86)) } // Boundary check totalSize := fileOffset + structSize // Integer overflow if (totalSize > fileOffset) != (structSize > 0) { pe.logger.Debug("encountered an outside read boundary when reading CHPE structure") return nil } if fileOffset >= pe.size || totalSize > pe.size { pe.logger.Debug("encountered an outside read boundary when reading CHPE structure") return nil } imgCHPEMeta := make([]byte, binary.Size(imgCHPEMetaX86)) copy(imgCHPEMeta, pe.data[fileOffset:fileOffset+structSize]) buf := bytes.NewReader(imgCHPEMeta) err = binary.Read(buf, binary.LittleEndian, &imgCHPEMetaX86) if err != nil { pe.logger.Debug("encountered an error while unpacking image CHPE Meta") return nil } hybridPE := HybridPE{} hybridPE.CHPEMetadata = imgCHPEMetaX86 // Code Ranges /* typedef struct _IMAGE_CHPE_RANGE_ENTRY { union { ULONG StartOffset; struct { ULONG NativeCode : 1; ULONG AddressBits : 31; } DUMMYSTRUCTNAME; } DUMMYUNIONNAME; ULONG Length; } IMAGE_CHPE_RANGE_ENTRY, *PIMAGE_CHPE_RANGE_ENTRY; */ rva = imgCHPEMetaX86.CHPECodeAddressRangeOffset for i := 0; i < int(imgCHPEMetaX86.CHPECodeAddressRangeCount); i++ { codeRange := CodeRange{} fileOffset := pe.GetOffsetFromRva(rva) begin, err := pe.ReadUint32(fileOffset) if err != nil { break } if begin&1 == 1 { codeRange.Machine = 1 begin = uint32(int(begin) & ^1) } codeRange.Begin = begin fileOffset += 4 size, err := pe.ReadUint32(fileOffset) if err != nil { break } codeRange.Length = size hybridPE.CodeRanges = append(hybridPE.CodeRanges, codeRange) rva += 8 } // Compiler IAT if imgCHPEMetaX86.CompilerIATPointer != 0 { rva := imgCHPEMetaX86.CompilerIATPointer for i := 0; i < 1024; i++ { compilerIAT := CompilerIAT{} compilerIAT.RVA = rva fileOffset = pe.GetOffsetFromRva(rva) compilerIAT.Value, err = pe.ReadUint32(fileOffset) if err != nil { break } impFunc, _ := pe.GetImportEntryInfoByRVA(compilerIAT.RVA) compilerIAT.Description = impFunc.Name hybridPE.CompilerIAT = append( hybridPE.CompilerIAT, compilerIAT) rva += 4 } } return &hybridPE } func (pe *File) getDynamicValueRelocTable() *DVRT { var structSize uint32 var imgDynRelocSize uint32 var retpolineType uint8 dvrt := DVRT{} imgDynRelocTable := ImageDynamicRelocationTable{} v := reflect.ValueOf(pe.LoadConfig.Struct) DynamicValueRelocTableOffset := v.Field(34).Uint() DynamicValueRelocTableSection := v.Field(35).Uint() if DynamicValueRelocTableOffset == 0 || DynamicValueRelocTableSection == 0 { return nil } section := pe.getSectionByName(".reloc") if section == nil { return nil } // Get the dynamic value relocation table header. rva := section.VirtualAddress + uint32(DynamicValueRelocTableOffset) offset := pe.GetOffsetFromRva(rva) structSize = uint32(binary.Size(imgDynRelocTable)) err := pe.structUnpack(&imgDynRelocTable, offset, structSize) if err != nil { return nil } dvrt.ImageDynamicRelocationTable = imgDynRelocTable offset += structSize // Get dynamic relocation entries according to version. switch imgDynRelocTable.Version { case 1: relocTableIt := uint32(0) baseBlockSize := uint32(0) // Iterate over our dynamic reloc table entries. for relocTableIt < imgDynRelocTable.Size { relocEntry := RelocEntry{} // Each block starts with the header. if pe.Is32 { imgDynReloc := ImageDynamicRelocation32{} imgDynRelocSize = uint32(binary.Size(imgDynReloc)) err = pe.structUnpack(&imgDynReloc, offset, imgDynRelocSize) if err != nil { return nil } relocEntry.ImageDynamicRelocation = imgDynReloc baseBlockSize = imgDynReloc.BaseRelocSize retpolineType = uint8(imgDynReloc.Symbol) } else { imgDynReloc := ImageDynamicRelocation64{} imgDynRelocSize = uint32(binary.Size(imgDynReloc)) err = pe.structUnpack(&imgDynReloc, offset, imgDynRelocSize) if err != nil { return nil } relocEntry.ImageDynamicRelocation = imgDynReloc baseBlockSize = imgDynReloc.BaseRelocSize retpolineType = uint8(imgDynReloc.Symbol) } offset += imgDynRelocSize relocTableIt += imgDynRelocSize // Then, for each page, there is a block that starts with a relocation entry: blockIt := uint32(0) for blockIt <= baseBlockSize-imgDynRelocSize { relocBlock := RelocBlock{} baseReloc := ImageBaseRelocation{} structSize = uint32(binary.Size(baseReloc)) err = pe.structUnpack(&baseReloc, offset, structSize) if err != nil { return nil } relocBlock.ImgBaseReloc = baseReloc offset += structSize // After that there are entries for all of the places which need // to be overwritten by the retpoline jump. The structure used // for those entries depends on the type (symbol) that was used // above. There are three types of retpoline so far. Entry for //each of them will contain pageRelativeOffset. The kernel uses // that entry to apply the proper replacement under // virtualAddress + pageRelativeOffset address. branchIt := uint32(0) switch retpolineType { case 3: for branchIt < (baseReloc.SizeOfBlock-structSize)/4 { imgImpCtrlTransDynReloc := ImageImportControlTransferDynamicRelocation{} dword, err := pe.ReadUint32(offset) if err != nil { return nil } imgImpCtrlTransDynReloc.PageRelativeOffset = uint16(dword) & 0xfff imgImpCtrlTransDynReloc.IndirectCall = uint16(dword) & 0x1000 >> 12 imgImpCtrlTransDynReloc.IATIndex = dword & 0xFFFFE000 >> 13 offset += 4 branchIt += 1 relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgImpCtrlTransDynReloc) } case 4: for branchIt < (baseReloc.SizeOfBlock-structSize)/2 { imgIndirCtrlTransDynReloc := ImageIndirectControlTransferDynamicRelocation{} word, err := pe.ReadUint16(offset) if err != nil { return nil } imgIndirCtrlTransDynReloc.PageRelativeOffset = word & 0xfff imgIndirCtrlTransDynReloc.IndirectCall = uint8(word & 0x1000 >> 12) imgIndirCtrlTransDynReloc.RexWPrefix = uint8(word & 0x2000 >> 13) imgIndirCtrlTransDynReloc.CfgCheck = uint8(word & 0x4000 >> 14) imgIndirCtrlTransDynReloc.Reserved = uint8(word & 0x8000 >> 15) branchIt += 1 offset += 2 // Padding might be added at the end of the block. if (ImageIndirectControlTransferDynamicRelocation{}) == imgIndirCtrlTransDynReloc { continue } relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgIndirCtrlTransDynReloc) } case 5: for branchIt < (baseReloc.SizeOfBlock-structSize)/2 { imgSwitchBranchDynReloc := ImageSwitchableBranchDynamicRelocation{} word, err := pe.ReadUint16(offset) if err != nil { return nil } imgSwitchBranchDynReloc.PageRelativeOffset = word & 0xfff imgSwitchBranchDynReloc.RegisterNumber = word & 0xf000 >> 12 offset += 2 branchIt += 1 // Padding might be added at the end of the block. if (ImageSwitchableBranchDynamicRelocation{}) == imgSwitchBranchDynReloc { continue } relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgSwitchBranchDynReloc) } } blockIt += baseReloc.SizeOfBlock relocEntry.RelocBlocks = append(relocEntry.RelocBlocks, relocBlock) } dvrt.Entries = append(dvrt.Entries, relocEntry) relocTableIt += baseBlockSize } case 2: fmt.Print("Got version 2 !") } return &dvrt } func (pe *File) getEnclaveConfiguration() *Enclave { enclave := Enclave{} v := reflect.ValueOf(pe.LoadConfig.Struct) EnclaveConfigurationPointer := v.Field(40).Uint() if EnclaveConfigurationPointer == 0 { return nil } if pe.Is32 { imgEnclaveCfg := ImageEnclaveConfig32{} imgEnclaveCfgSize := uint32(binary.Size(imgEnclaveCfg)) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva := uint32(EnclaveConfigurationPointer) - imageBase offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&imgEnclaveCfg, offset, imgEnclaveCfgSize) if err != nil { return nil } enclave.Config = imgEnclaveCfg } else { imgEnclaveCfg := ImageEnclaveConfig64{} imgEnclaveCfgSize := uint32(binary.Size(imgEnclaveCfg)) imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva := uint32(EnclaveConfigurationPointer - imageBase) offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&imgEnclaveCfg, offset, imgEnclaveCfgSize) if err != nil { return nil } enclave.Config = imgEnclaveCfg } // Get the array of images that an enclave can import. val := reflect.ValueOf(enclave.Config) ImportListRVA := val.FieldByName("ImportList").Interface().(uint32) NumberOfImports := val.FieldByName("NumberOfImports").Interface().(uint32) ImportEntrySize := val.FieldByName("ImportEntrySize").Interface().(uint32) offset := pe.GetOffsetFromRva(ImportListRVA) for i := uint32(0); i < NumberOfImports; i++ { imgEncImp := ImageEnclaveImport{} imgEncImpSize := uint32(binary.Size(imgEncImp)) err := pe.structUnpack(&imgEncImp, offset, imgEncImpSize) if err != nil { return nil } offset += ImportEntrySize enclave.Imports = append(enclave.Imports, imgEncImp) } return &enclave } func (pe *File) getVolatileMetadata() *VolatileMetadata { volatileMeta := VolatileMetadata{} imgVolatileMeta := ImageVolatileMetadata{} rva := uint32(0) v := reflect.ValueOf(pe.LoadConfig.Struct) if v.NumField() <= 41 { return nil } VolatileMetadataPointer := v.Field(41).Uint() if VolatileMetadataPointer == 0 { return nil } if pe.Is32 { imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase rva = uint32(VolatileMetadataPointer) - imageBase } else { imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase rva = uint32(VolatileMetadataPointer - imageBase) } offset := pe.GetOffsetFromRva(rva) imgVolatileMetaSize := uint32(binary.Size(imgVolatileMeta)) err := pe.structUnpack(&imgVolatileMeta, offset, imgVolatileMetaSize) if err != nil { return nil } volatileMeta.Struct = imgVolatileMeta if imgVolatileMeta.VolatileAccessTable != 0 && imgVolatileMeta.VolatileAccessTableSize != 0 { offset := pe.GetOffsetFromRva(imgVolatileMeta.VolatileAccessTable) for i := uint32(0); i < imgVolatileMeta.VolatileAccessTableSize/4; i++ { accessRVA, err := pe.ReadUint32(offset) if err != nil { break } volatileMeta.AccessRVATable = append(volatileMeta.AccessRVATable, accessRVA) offset += 4 } } if imgVolatileMeta.VolatileInfoRangeTable != 0 && imgVolatileMeta.VolatileInfoRangeTableSize != 0 { offset := pe.GetOffsetFromRva(imgVolatileMeta.VolatileInfoRangeTable) rangeEntrySize := uint32(binary.Size(RangeTableEntry{})) for i := uint32(0); i < imgVolatileMeta.VolatileInfoRangeTableSize/rangeEntrySize; i++ { entry := RangeTableEntry{} err := pe.structUnpack(&entry, offset, rangeEntrySize) if err != nil { break } volatileMeta.InfoRangeTable = append(volatileMeta.InfoRangeTable, entry) offset += rangeEntrySize } } return &volatileMeta } // String returns a string interpretation of the load config directory image // guard flag. func (flag ImageGuardFlagType) String() string { imageGuardFlagTypeMap := map[ImageGuardFlagType]string{ ImageGuardFlagFIDSuppressed: "FID Suppressed", ImageGuardFlagExportSuppressed: "Export Suppressed", } v, ok := imageGuardFlagTypeMap[flag] if ok { return v } return "?" } ================================================ FILE: loadconfig_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) func TestLoadConfigDirectory(t *testing.T) { tests := []struct { in string out interface{} }{ { in: getAbsoluteFilePath("test/pspluginwkr.dll"), out: ImageLoadConfigDirectory32{ Size: 0x48, SecurityCookie: 0x45e44220, SEHandlerTable: 0x45e382e0, SEHandlerCount: 0x1, }, }, { in: getAbsoluteFilePath("test/00da1a2a9d9ebf447508bf6550f05f466f8eabb4ed6c4f2a524c0769b2d75bc1"), out: ImageLoadConfigDirectory32{ Size: 0x5c, SecurityCookie: 0x43D668, SEHandlerTable: 0x439C70, SEHandlerCount: 0x25, GuardCFCheckFunctionPointer: 0x432260, GuardCFFunctionTable: 0x4322D4, GuardCFFunctionCount: 0x90, GuardFlags: 0x10013500, }, }, { in: getAbsoluteFilePath("test/3a081c7fe475ec68ed155c76d30cfddc4d41f7a09169810682d1c75421e98eaa"), out: ImageLoadConfigDirectory32{ Size: 0xa0, SecurityCookie: 0x417008, SEHandlerTable: 0x415410, SEHandlerCount: 0x2, GuardCFCheckFunctionPointer: 0x40e384, GuardFlags: 0x100, }, }, { in: getAbsoluteFilePath("test/IEAdvpack.dll"), out: ImageLoadConfigDirectory32{ Size: 0xa4, SecurityCookie: 0x6501b074, SEHandlerTable: 0x650046d0, SEHandlerCount: 0x1, GuardCFCheckFunctionPointer: 0x6502937c, GuardCFFunctionTable: 0x650010f0, GuardCFFunctionCount: 0x55, GuardFlags: 0x10017500, GuardAddressTakenIATEntryTable: 0x6500129c, GuardAddressTakenIATEntryCount: 0x1, GuardLongJumpTargetTable: 0x650012a4, GuardLongJumpTargetCount: 0x2, }, }, { in: getAbsoluteFilePath("test/KernelBase.dll"), out: ImageLoadConfigDirectory32{ Size: 0xb8, DependentLoadFlags: 0x800, SecurityCookie: 0x101f3b50, SEHandlerTable: 0x10090c40, SEHandlerCount: 0x3, GuardCFCheckFunctionPointer: 0x101f7b08, GuardCFFunctionTable: 0x1005ab70, GuardCFFunctionCount: 0xc4a, GuardFlags: 0x10017500, GuardAddressTakenIATEntryTable: 0x1005e8e4, GuardAddressTakenIATEntryCount: 0xa, VolatileMetadataPointer: 0x10090c4c, }, }, { in: getAbsoluteFilePath("test/WdfCoInstaller01011.dll"), out: ImageLoadConfigDirectory64{ Size: 0x70, SecurityCookie: 0x18000f108, }, }, { in: getAbsoluteFilePath("test/D2D1Debug2.dll"), out: ImageLoadConfigDirectory64{ Size: 0x94, SecurityCookie: 0x180061008, GuardCFCheckFunctionPointer: 0x180001000, }, }, { in: getAbsoluteFilePath("test/amdxata.sys"), out: ImageLoadConfigDirectory64{ Size: 0xa0, SecurityCookie: 0x1c00030b0, GuardCFCheckFunctionPointer: 0x1c0005160, GuardCFDispatchFunctionPointer: 0x1c0005168, GuardCFFunctionTable: 0x1c0009000, GuardCFFunctionCount: 0x17, GuardFlags: 0x500, }, }, { in: getAbsoluteFilePath("test/amdi2c.sys"), out: ImageLoadConfigDirectory64{ Size: 0xd0, SecurityCookie: 0x140009090, GuardCFCheckFunctionPointer: 0x140008100, GuardCFDispatchFunctionPointer: 0x140008108, GuardFlags: 0x100, }, }, { in: getAbsoluteFilePath("test/brave.exe"), out: ImageLoadConfigDirectory64{ Size: 0x100, SecurityCookie: 0x14017b648, GuardCFCheckFunctionPointer: 0x140191000, GuardCFDispatchFunctionPointer: 0x140191008, GuardCFFunctionTable: 0x14016b627, GuardCFFunctionCount: 0x561, GuardFlags: 0x500, }, }, { in: getAbsoluteFilePath("test/shimeng.dll"), out: ImageLoadConfigDirectory64{ Size: 0x108, SecurityCookie: 0x180003000, GuardCFCheckFunctionPointer: 0x180002188, GuardCFDispatchFunctionPointer: 0x180002190, GuardCFFunctionTable: 0x180002198, GuardCFFunctionCount: 0x3, GuardFlags: 0x17500, }, }, { in: getAbsoluteFilePath("test/kernel32.dll"), out: ImageLoadConfigDirectory64{ Size: 0x118, SecurityCookie: 0x1800b3220, GuardCFCheckFunctionPointer: 0x180084218, GuardCFDispatchFunctionPointer: 0x180084220, GuardCFFunctionTable: 0x180084388, GuardCFFunctionCount: 0x5e6, GuardFlags: 0x10417500, GuardAddressTakenIATEntryTable: 0x180086108, GuardAddressTakenIATEntryCount: 0x3, GuardEHContinuationTable: 0x180084228, GuardEHContinuationCount: 0x46, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } imgLoadCfgDirectory := file.LoadConfig.Struct if imgLoadCfgDirectory != tt.out { t.Fatalf("load config directory structure assertion failed, got %v, want %v", imgLoadCfgDirectory, tt.out) } }) } } func TestLoadConfigDirectorySEHHandlers(t *testing.T) { tests := []struct { in string out []uint32 }{ { in: getAbsoluteFilePath("test/KernelBase.dll"), out: []uint32{0x14ad30, 0x14af40, 0x14b0d0}, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } sehHandlers := file.LoadConfig.SEH if !reflect.DeepEqual(sehHandlers, tt.out) { t.Fatalf("load config SEH handlers assertion failed, got %v, want %v", sehHandlers, tt.out) } }) } } func TestLoadConfigDirectoryControlFlowGuardFunctions(t *testing.T) { type TestGFIDSEntry struct { entriesCount int entryIndex int CFGFunction CFGFunction } tests := []struct { in string out TestGFIDSEntry }{ { in: getAbsoluteFilePath("test/KernelBase.dll"), out: TestGFIDSEntry{ entriesCount: 0xc4a, entryIndex: 0x1, CFGFunction: CFGFunction{ RVA: 0xfe2a0, Flags: ImageGuardFlagExportSuppressed, Description: "GetCalendarInfoEx", }, }, }, { in: getAbsoluteFilePath("test/kernel32.dll"), out: TestGFIDSEntry{ entriesCount: 0x5e6, entryIndex: 0x5d3, CFGFunction: CFGFunction{ RVA: 0x71390, Flags: ImageGuardFlagExportSuppressed, Description: "QuirkIsEnabledForPackage2Worker", }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: false} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } gfids := file.LoadConfig.GFIDS if len(gfids) != tt.out.entriesCount { t.Fatalf("load config GFIDS entries count assert failed, got %v, want %v", len(gfids), tt.out.entriesCount) } guardedFunction := gfids[tt.out.entryIndex] if !reflect.DeepEqual(guardedFunction, tt.out.CFGFunction) { t.Fatalf("load config GFIDS entry assertion failed, got %v, want %v", guardedFunction, tt.out.CFGFunction) } }) } } func TestLoadConfigDirectoryControlFlowGuardIAT(t *testing.T) { type TestGFIDSEntry struct { entriesCount int entryIndex int CFGFunction CFGIATEntry } tests := []struct { in string out TestGFIDSEntry }{ { in: getAbsoluteFilePath("test/KernelBase.dll"), out: TestGFIDSEntry{ entriesCount: 0xa, entryIndex: 0x9, CFGFunction: CFGIATEntry{ RVA: 0x1f7924, IATValue: 0x80000008, INTValue: 0x80000008, Description: "ntdll.dll!#8", }, }, }, { in: getAbsoluteFilePath("test/kernel32.dll"), out: TestGFIDSEntry{ entriesCount: 0x3, entryIndex: 0x2, CFGFunction: CFGIATEntry{ RVA: 0x83838, IATValue: 0xac0e0, INTValue: 0xac0e0, Description: "ntdll.dll!RtlGetLengthWithoutLastFullDosOrNtPathElement", }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: false} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } cfgIAT := file.LoadConfig.CFGIAT if len(cfgIAT) != tt.out.entriesCount { t.Fatalf("load config CFG IAT entries count assert failed, got %v, want %v", len(cfgIAT), tt.out.entriesCount) } cfgIATEntry := cfgIAT[tt.out.entryIndex] if !reflect.DeepEqual(cfgIATEntry, tt.out.CFGFunction) { t.Fatalf("load config CFG IAT entry assertion failed, got %v, want %v", cfgIATEntry, tt.out.CFGFunction) } }) } } func TestLoadConfigDirectoryControlFlowGuardLongJump(t *testing.T) { tests := []struct { in string out []uint32 }{ { in: getAbsoluteFilePath("test/IEAdvpack.dll"), out: []uint32{0x13EDD, 0x1434F}, }, { in: getAbsoluteFilePath("test/PSCRIPT5.DLL"), out: []uint32{0x3FE11, 0x401F8, 0x4077D, 0x40B53, 0x40DFD, 0x40FB3}, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } cfgLongJumpTargetTable := file.LoadConfig.CFGLongJump if !reflect.DeepEqual(cfgLongJumpTargetTable, tt.out) { t.Fatalf("load config CFG long jump target table assertion failed, got %v, want %v", cfgLongJumpTargetTable, tt.out) } }) } } func TestLoadConfigDirectoryHybridPE(t *testing.T) { type TestCHPE struct { imgCHPEMetadata ImageCHPEMetadataX86 codeRanges []CodeRange compilerIAT CompilerIAT } tests := []struct { in string out TestCHPE }{ { in: getAbsoluteFilePath("test/msyuv.dll"), out: TestCHPE{ imgCHPEMetadata: ImageCHPEMetadataX86{ Version: 0x4, CHPECodeAddressRangeOffset: 0x26f8, CHPECodeAddressRangeCount: 0x4, WoWA64ExceptionHandlerFunctionPtr: 0x1000c, WoWA64DispatchCallFunctionPtr: 0x10000, WoWA64DispatchIndirectCallFunctionPtr: 0x10004, WoWA64DispatchIndirectCallCfgFunctionPtr: 0x10008, WoWA64DispatchRetFunctionPtr: 0x10010, WoWA64DispatchRetLeafFunctionPtr: 0x10014, WoWA64DispatchJumpFunctionPtr: 0x10018, CompilerIATPointer: 0x11000, WoWA64RDTSCFunctionPtr: 0x1001c, }, codeRanges: []CodeRange{ { Begin: 0x1000, Length: 0x10, Machine: 0x0, }, { Begin: 0x2a00, Length: 0x4e28, Machine: 0x1, }, { Begin: 0x8000, Length: 0x4b1, Machine: 0x0, }, { Begin: 0x9000, Length: 0x2090, Machine: 0x1, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: false} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } chpe := file.LoadConfig.CHPE if chpe.CHPEMetadata != tt.out.imgCHPEMetadata { t.Fatalf("load config CHPE metadata assertion failed, got %v, want %v", chpe.CHPEMetadata, tt.out.imgCHPEMetadata) } if !reflect.DeepEqual(chpe.CodeRanges, tt.out.codeRanges) { t.Fatalf("load config CHPE code ranges assertion failed, got %v, want %v", chpe.CodeRanges, tt.out.codeRanges) } // TODO: test compiler IAT. }) } } func TestLoadConfigDirectoryDVRT(t *testing.T) { type TestDVRT struct { imgDynRelocTable ImageDynamicRelocationTable relocEntriesCount int } tests := []struct { in string out TestDVRT }{ { in: getAbsoluteFilePath("test/WdBoot.sys"), out: TestDVRT{ imgDynRelocTable: ImageDynamicRelocationTable{ Version: 0x1, Size: 0x2dc, }, relocEntriesCount: 0x2, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } DVRT := file.LoadConfig.DVRT if DVRT.ImageDynamicRelocationTable != tt.out.imgDynRelocTable { t.Fatalf("load config DVRT header assertion failed, got %v, want %v", DVRT.ImageDynamicRelocationTable, tt.out.imgDynRelocTable) } if len(DVRT.Entries) != tt.out.relocEntriesCount { t.Fatalf("load config DVRT entries count assertion failed, got %v, want %v", len(DVRT.Entries), tt.out.relocEntriesCount) } }) } } func TestLoadConfigDirectoryDVRTRetpolineType(t *testing.T) { type DVRTRetpolineType struct { relocEntryIdx int imgDynReloc interface{} RelocBlockCount int relocBlockIdx int relocBlock RelocBlock } tests := []struct { in string out DVRTRetpolineType }{ { in: getAbsoluteFilePath("test/WdBoot.sys"), out: DVRTRetpolineType{ relocEntryIdx: 0x0, imgDynReloc: ImageDynamicRelocation64{ Symbol: 0x3, BaseRelocSize: 0x278, }, RelocBlockCount: 0x7, relocBlockIdx: 0x0, relocBlock: RelocBlock{ ImgBaseReloc: ImageBaseRelocation{ VirtualAddress: 0x2000, SizeOfBlock: 0xc, }, TypeOffsets: []interface{}{ ImageImportControlTransferDynamicRelocation{ PageRelativeOffset: 0x611, IndirectCall: 0x0, IATIndex: 0x28, }, }, }, }, }, { in: getAbsoluteFilePath("test/WdBoot.sys"), out: DVRTRetpolineType{ relocEntryIdx: 0x1, imgDynReloc: ImageDynamicRelocation64{ Symbol: 0x4, BaseRelocSize: 0x4c, }, RelocBlockCount: 0x5, relocBlockIdx: 0x4, relocBlock: RelocBlock{ ImgBaseReloc: ImageBaseRelocation{ VirtualAddress: 0xb000, SizeOfBlock: 0xc, }, TypeOffsets: []interface{}{ ImageIndirectControlTransferDynamicRelocation{ PageRelativeOffset: 0x58e, IndirectCall: 0x1, CfgCheck: 0x1, }, }, }, }, }, { in: getAbsoluteFilePath("test/acpi.sys"), out: DVRTRetpolineType{ relocEntryIdx: 0x2, imgDynReloc: ImageDynamicRelocation64{ Symbol: 0x5, BaseRelocSize: 0x4c, }, RelocBlockCount: 0x6, relocBlockIdx: 0x5, relocBlock: RelocBlock{ ImgBaseReloc: ImageBaseRelocation{ VirtualAddress: 0x43000, SizeOfBlock: 0xc, }, TypeOffsets: []interface{}{ ImageSwitchableBranchDynamicRelocation{ PageRelativeOffset: 0xd1, RegisterNumber: 0x1, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } DVRT := file.LoadConfig.DVRT relocEntry := DVRT.Entries[tt.out.relocEntryIdx] if relocEntry.ImageDynamicRelocation != tt.out.imgDynReloc { t.Fatalf("load config DVRT reloc entry imaged dynamic relocation assertion failed, got %#v, want %#v", relocEntry.ImageDynamicRelocation, tt.out.imgDynReloc) } if len(relocEntry.RelocBlocks) != tt.out.RelocBlockCount { t.Fatalf("load config DVRT reloc block count dynamic relocation assertion failed, got %v, want %v", len(relocEntry.RelocBlocks), tt.out.RelocBlockCount) } relocBlock := relocEntry.RelocBlocks[tt.out.relocBlockIdx] if !reflect.DeepEqual(relocBlock, tt.out.relocBlock) { t.Fatalf("load config DVRT reloc block assertion failed, got %#v, want %#v", relocBlock, tt.out.relocBlock) } }) } } func TestLoadConfigDirectoryEnclave(t *testing.T) { tests := []struct { in string out Enclave }{ { in: getAbsoluteFilePath("test/SgrmEnclave_secure.dll"), out: Enclave{ Config: ImageEnclaveConfig64{ Size: 0x50, MinimumRequiredConfigSize: 0x4c, NumberOfImports: 0x4, ImportList: 0x55224, ImportEntrySize: 0x50, FamilyID: [ImageEnclaveShortIDLength]uint8{0xb1, 0x35, 0x7c, 0x2b, 0x69, 0x9f, 0x47, 0xf9, 0xbb, 0xc9, 0x4f, 0x44, 0xf2, 0x54, 0xdb, 0x9d}, ImageID: [ImageEnclaveShortIDLength]uint8{0x24, 0x56, 0x46, 0x36, 0xcd, 0x4a, 0x4a, 0xd8, 0x86, 0xa2, 0xf4, 0xec, 0x25, 0xa9, 0x72, 0x2}, ImageVersion: 0x1, SecurityVersion: 0x1, EnclaveSize: 0x10000000, NumberOfThreads: 0x8, EnclaveFlags: 0x1, }, Imports: []ImageEnclaveImport{ { MatchType: 0x0, ImportName: 0xffff, }, { MatchType: 0x4, ImageID: [ImageEnclaveShortIDLength]uint8{ 0xf0, 0x3c, 0xcd, 0xa7, 0xe8, 0x7b, 0x46, 0xeb, 0xaa, 0xe7, 0x1f, 0x13, 0xd5, 0xcd, 0xde, 0x5d}, ImportName: 0x5b268, }, { MatchType: 0x4, ImageID: [ImageEnclaveShortIDLength]uint8{ 0x20, 0x27, 0xbd, 0x68, 0x75, 0x59, 0x49, 0xb7, 0xbe, 0x6, 0x34, 0x50, 0xe2, 0x16, 0xd7, 0xed}, ImportName: 0x5b428, }, { MatchType: 0x4, ImageID: [ImageEnclaveShortIDLength]uint8{ 0x72, 0x84, 0x41, 0x72, 0x67, 0xa8, 0x4e, 0x8d, 0xbf, 0x1, 0x28, 0x4b, 0x7, 0x43, 0x2b, 0x1e}, ImportName: 0x5b63c, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } enclave := file.LoadConfig.Enclave if !reflect.DeepEqual(*enclave, tt.out) { t.Fatalf("load config enclave assertion failed, got %#v, want %#v", enclave, tt.out) } }) } } func TestLoadConfigDirectoryVolatileMetadata(t *testing.T) { type TestVolatileMetadata struct { imgVolatileMetadata ImageVolatileMetadata accessRVATableCount int accessRVATableIndex int accessRVAEntry uint32 infoRangeTableCount int infoRangeTableIndex int infoRangeEntry RangeTableEntry } tests := []struct { in string out TestVolatileMetadata }{ { in: getAbsoluteFilePath("test/KernelBase.dll"), out: TestVolatileMetadata{ imgVolatileMetadata: ImageVolatileMetadata{ Size: 0x18, Version: 0x1, VolatileAccessTable: 0x00090C64, VolatileAccessTableSize: 0x00002E48, VolatileInfoRangeTable: 0x00093AAC, VolatileInfoRangeTableSize: 0x000001D0, }, accessRVATableCount: 0xB92, accessRVATableIndex: 0xB91, accessRVAEntry: 0x1DF998, infoRangeTableCount: 0x3A, infoRangeTableIndex: 0x39, infoRangeEntry: RangeTableEntry{ RVA: 0x16BB10, Size: 0x75550, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseLoadConfigDirectory(va, size) if err != nil { t.Fatalf("parseLoadConfigDirectory(%s) failed, reason: %v", tt.in, err) } volatileMetadata := file.LoadConfig.VolatileMetadata if volatileMetadata.Struct != tt.out.imgVolatileMetadata { t.Fatalf("load config image volatile metadata assertion failed, got %v, want %v", volatileMetadata, tt.out.imgVolatileMetadata) } if len(volatileMetadata.AccessRVATable) != tt.out.accessRVATableCount { t.Fatalf("load config access RVA table entries count assert failed, got %v, want %v", len(volatileMetadata.AccessRVATable), tt.out.accessRVATableCount) } accessRVAEntry := volatileMetadata.AccessRVATable[tt.out.accessRVATableIndex] if accessRVAEntry != tt.out.accessRVAEntry { t.Fatalf("load config access RVA table entry assertion failed, got %v, want %v", accessRVAEntry, tt.out.accessRVAEntry) } if len(volatileMetadata.InfoRangeTable) != tt.out.infoRangeTableCount { t.Fatalf("load config info range table entries count assert failed, got %v, want %v", len(volatileMetadata.InfoRangeTable), tt.out.infoRangeTableCount) } infoRangeEntry := volatileMetadata.InfoRangeTable[tt.out.infoRangeTableIndex] if infoRangeEntry != tt.out.infoRangeEntry { t.Fatalf("load config info range table entry assertion failed, got %v, want %v", infoRangeEntry, tt.out.infoRangeEntry) } }) } } func TestLoadConfigDirectoryCorruptSize(t *testing.T) { // This PE has a LoadConfig Size field (0xb50087) that far exceeds the // maximum valid struct size. parseLoadConfigDirectory should return an // error instead of panicking. path := getAbsoluteFilePath("test/03f18017c215ad67f4d043cd733d6f762edbf99f9f7e0ed89166536f80544d96") ops := Options{Fast: true} file, err := New(path, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", path, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", path, err) } oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryLoadConfig] err = file.parseLoadConfigDirectory(dirEntry.VirtualAddress, dirEntry.Size) if err == nil { t.Fatalf("parseLoadConfigDirectory should have failed for corrupt LoadConfig size") } } ================================================ FILE: log/README.md ================================================ # Logger This code was taken from the go microservice framework [kratos](https://github.com/go-kratos/kratos). ## Usage ### Structured logging ```go logger := log.NewStdLogger(os.Stdout) // fields & valuer logger = log.With(logger, "service.name", "hellworld", "service.version", "v1.0.0", "ts", log.DefaultTimestamp, "caller", log.DefaultCaller, ) logger.Log(log.LevelInfo, "key", "value") // helper helper := log.NewHelper(logger) helper.Log(log.LevelInfo, "key", "value") helper.Info("info message") helper.Infof("info %s", "message") helper.Infow("key", "value") // filter log := log.NewHelper(log.NewFilter(logger, log.FilterLevel(log.LevelInfo), log.FilterKey("foo"), log.FilterValue("bar"), log.FilterFunc(customFilter), )) log.Debug("debug log") log.Info("info log") log.Warn("warn log") log.Error("warn log") ``` ## Third party log library If you need to implement a third party logging library like `zap`, have a look this [url](https://github.com/go-kratos/kratos/tree/main/contrib/log). ================================================ FILE: log/filter.go ================================================ package log // FilterOption is filter option. type FilterOption func(*Filter) const fuzzyStr = "***" // FilterLevel with filter level. func FilterLevel(level Level) FilterOption { return func(opts *Filter) { opts.level = level } } // FilterKey with filter key. func FilterKey(key ...string) FilterOption { return func(o *Filter) { for _, v := range key { o.key[v] = struct{}{} } } } // FilterValue with filter value. func FilterValue(value ...string) FilterOption { return func(o *Filter) { for _, v := range value { o.value[v] = struct{}{} } } } // FilterFunc with filter func. func FilterFunc(f func(level Level, keyvals ...interface{}) bool) FilterOption { return func(o *Filter) { o.filter = f } } // Filter is a logger filter. type Filter struct { logger Logger level Level key map[interface{}]struct{} value map[interface{}]struct{} filter func(level Level, keyvals ...interface{}) bool } // NewFilter new a logger filter. func NewFilter(logger Logger, opts ...FilterOption) *Filter { options := Filter{ logger: logger, key: make(map[interface{}]struct{}), value: make(map[interface{}]struct{}), } for _, o := range opts { o(&options) } return &options } // Log Print log by level and keyvals. func (f *Filter) Log(level Level, keyvals ...interface{}) error { if level < f.level { return nil } // fkv is used to provide a slice to contains both logger.prefix and keyvals for filter var fkv []interface{} if l, ok := f.logger.(*logger); ok { if len(l.prefix) > 0 { fkv = make([]interface{}, 0, len(l.prefix)+len(keyvals)) fkv = append(fkv, l.prefix...) fkv = append(fkv, keyvals...) } } else { fkv = keyvals } if f.filter != nil && f.filter(level, fkv...) { return nil } if len(f.key) > 0 || len(f.value) > 0 { for i := 0; i < len(keyvals); i += 2 { v := i + 1 if v >= len(keyvals) { continue } if _, ok := f.key[keyvals[i]]; ok { keyvals[v] = fuzzyStr } if _, ok := f.value[keyvals[v]]; ok { keyvals[v] = fuzzyStr } } } return f.logger.Log(level, keyvals...) } ================================================ FILE: log/filter_test.go ================================================ package log import ( "bytes" "io" "testing" ) func TestFilterAll(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(NewFilter(logger, FilterLevel(LevelDebug), FilterKey("username"), FilterValue("hello"), FilterFunc(testFilterFunc), )) log.Log(LevelDebug, "msg", "test debug") log.Info("hello") log.Infow("password", "123456") log.Infow("username", "kratos") log.Warn("warn log") } func TestFilterLevel(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(NewFilter(NewFilter(logger, FilterLevel(LevelWarn)))) log.Log(LevelDebug, "msg1", "te1st debug") log.Debug("test debug") log.Debugf("test %s", "debug") log.Debugw("log", "test debug") log.Warn("warn log") } func TestFilterCaller(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewFilter(logger) _ = log.Log(LevelDebug, "msg1", "te1st debug") logHelper := NewHelper(NewFilter(logger)) logHelper.Log(LevelDebug, "msg1", "te1st debug") } func TestFilterKey(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(NewFilter(logger, FilterKey("password"))) log.Debugw("password", "123456") } func TestFilterValue(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(NewFilter(logger, FilterValue("debug"))) log.Debugf("test %s", "debug") } func TestFilterFunc(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(NewFilter(logger, FilterFunc(testFilterFunc))) log.Debug("debug level") log.Infow("password", "123456") } func BenchmarkFilterKey(b *testing.B) { log := NewHelper(NewFilter(NewStdLogger(io.Discard), FilterKey("password"))) for i := 0; i < b.N; i++ { log.Infow("password", "123456") } } func BenchmarkFilterValue(b *testing.B) { log := NewHelper(NewFilter(NewStdLogger(io.Discard), FilterValue("password"))) for i := 0; i < b.N; i++ { log.Infow("password") } } func BenchmarkFilterFunc(b *testing.B) { log := NewHelper(NewFilter(NewStdLogger(io.Discard), FilterFunc(testFilterFunc))) for i := 0; i < b.N; i++ { log.Info("password", "123456") } } func testFilterFunc(level Level, keyvals ...interface{}) bool { if level == LevelWarn { return true } for i := 0; i < len(keyvals); i++ { if keyvals[i] == "password" { keyvals[i+1] = fuzzyStr } } return false } func TestFilterFuncWitchLoggerPrefix(t *testing.T) { buf := new(bytes.Buffer) tests := []struct { logger Logger want string }{ { logger: NewFilter(With(NewStdLogger(buf), "caller", "caller", "prefix", "whaterver"), FilterFunc(testFilterFuncWithLoggerPrefix)), want: "", }, { logger: NewFilter(With(NewStdLogger(buf), "caller", "caller"), FilterFunc(testFilterFuncWithLoggerPrefix)), want: "INFO caller=caller msg=msg\n", }, } for _, tt := range tests { err := tt.logger.Log(LevelInfo, "msg", "msg") if err != nil { t.Fatal("err should be nil") } got := buf.String() if got != tt.want { t.Fatalf("filter should catch prefix, want %s, got %s.", tt.want, got) } buf.Reset() } } func testFilterFuncWithLoggerPrefix(level Level, keyvals ...interface{}) bool { if level == LevelWarn { return true } for i := 0; i < len(keyvals); i += 2 { if keyvals[i] == "prefix" { return true } } return false } ================================================ FILE: log/global.go ================================================ package log import ( "sync" ) // globalLogger is designed as a global logger in current process. var global = &loggerAppliance{} // loggerAppliance is the proxy of `Logger` to // make logger change will affect all sub-logger. type loggerAppliance struct { lock sync.Mutex Logger helper *Helper } func init() { global.SetLogger(DefaultLogger) } func (a *loggerAppliance) SetLogger(in Logger) { a.lock.Lock() defer a.lock.Unlock() a.Logger = in a.helper = NewHelper(a.Logger) } func (a *loggerAppliance) GetLogger() Logger { return a.Logger } // SetLogger should be called before any other log call. // And it is NOT THREAD SAFE. func SetLogger(logger Logger) { global.SetLogger(logger) } // GetLogger returns global logger appliance as logger in current process. func GetLogger() Logger { return global } // Log Print log by level and keyvals. func Log(level Level, keyvals ...interface{}) { global.helper.Log(level, keyvals...) } // Debug logs a message at debug level. func Debug(a ...interface{}) { global.helper.Debug(a...) } // Debugf logs a message at debug level. func Debugf(format string, a ...interface{}) { global.helper.Debugf(format, a...) } // Debugw logs a message at debug level. func Debugw(keyvals ...interface{}) { global.helper.Debugw(keyvals...) } // Info logs a message at info level. func Info(a ...interface{}) { global.helper.Info(a...) } // Infof logs a message at info level. func Infof(format string, a ...interface{}) { global.helper.Infof(format, a...) } // Infow logs a message at info level. func Infow(keyvals ...interface{}) { global.helper.Infow(keyvals...) } // Warn logs a message at warn level. func Warn(a ...interface{}) { global.helper.Warn(a...) } // Warnf logs a message at warnf level. func Warnf(format string, a ...interface{}) { global.helper.Warnf(format, a...) } // Warnw logs a message at warnf level. func Warnw(keyvals ...interface{}) { global.helper.Warnw(keyvals...) } // Error logs a message at error level. func Error(a ...interface{}) { global.helper.Error(a...) } // Errorf logs a message at error level. func Errorf(format string, a ...interface{}) { global.helper.Errorf(format, a...) } // Errorw logs a message at error level. func Errorw(keyvals ...interface{}) { global.helper.Errorw(keyvals...) } // Fatal logs a message at fatal level. func Fatal(a ...interface{}) { global.helper.Fatal(a...) } // Fatalf logs a message at fatal level. func Fatalf(format string, a ...interface{}) { global.helper.Fatalf(format, a...) } // Fatalw logs a message at fatal level. func Fatalw(keyvals ...interface{}) { global.helper.Fatalw(keyvals...) } ================================================ FILE: log/global_test.go ================================================ package log import ( "bytes" "fmt" "os" "strings" "testing" ) func TestGlobalLog(t *testing.T) { buffer := &bytes.Buffer{} SetLogger(NewStdLogger(buffer)) testCases := []struct { level Level content []interface{} }{ { LevelDebug, []interface{}{"test debug"}, }, { LevelInfo, []interface{}{"test info"}, }, { LevelInfo, []interface{}{"test %s", "info"}, }, { LevelWarn, []interface{}{"test warn"}, }, { LevelError, []interface{}{"test error"}, }, { LevelError, []interface{}{"test %s", "error"}, }, } expected := []string{} for _, tc := range testCases { msg := fmt.Sprintf(tc.content[0].(string), tc.content[1:]...) switch tc.level { case LevelDebug: Debugf(tc.content[0].(string), tc.content[1:]...) expected = append(expected, fmt.Sprintf("%s msg=%s", "DEBUG", msg)) case LevelInfo: Infof(tc.content[0].(string), tc.content[1:]...) expected = append(expected, fmt.Sprintf("%s msg=%s", "INFO", msg)) case LevelWarn: Warnf(tc.content[0].(string), tc.content[1:]...) expected = append(expected, fmt.Sprintf("%s msg=%s", "WARN", msg)) case LevelError: Errorf(tc.content[0].(string), tc.content[1:]...) expected = append(expected, fmt.Sprintf("%s msg=%s", "ERROR", msg)) } } expected = append(expected, "") t.Logf("Content: %s", buffer.String()) if buffer.String() != strings.Join(expected, "\n") { t.Errorf("Expected: %s, got: %s", strings.Join(expected, "\n"), buffer.String()) } } func TestGlobalLogUpdate(t *testing.T) { l := &loggerAppliance{} l.SetLogger(NewStdLogger(os.Stdout)) LOG := NewHelper(l) LOG.Info("Log to stdout") buffer := &bytes.Buffer{} l.SetLogger(NewStdLogger(buffer)) LOG.Info("Log to buffer") expected := "INFO msg=Log to buffer\n" if buffer.String() != expected { t.Errorf("Expected: %s, got: %s", expected, buffer.String()) } } ================================================ FILE: log/helper.go ================================================ package log import ( "context" "fmt" "os" ) // DefaultMessageKey default message key. var DefaultMessageKey = "msg" // Option is Helper option. type Option func(*Helper) // Helper is a logger helper. type Helper struct { logger Logger msgKey string } // WithMessageKey with message key. func WithMessageKey(k string) Option { return func(opts *Helper) { opts.msgKey = k } } // NewHelper new a logger helper. func NewHelper(logger Logger, opts ...Option) *Helper { options := &Helper{ msgKey: DefaultMessageKey, // default message key logger: logger, } for _, o := range opts { o(options) } return options } // WithContext returns a shallow copy of h with its context changed // to ctx. The provided ctx must be non-nil. func (h *Helper) WithContext(ctx context.Context) *Helper { return &Helper{ msgKey: h.msgKey, logger: WithContext(ctx, h.logger), } } // Log Print log by level and keyvals. func (h *Helper) Log(level Level, keyvals ...interface{}) { _ = h.logger.Log(level, keyvals...) } // Debug logs a message at debug level. func (h *Helper) Debug(a ...interface{}) { _ = h.logger.Log(LevelDebug, h.msgKey, fmt.Sprint(a...)) } // Debugf logs a message at debug level. func (h *Helper) Debugf(format string, a ...interface{}) { _ = h.logger.Log(LevelDebug, h.msgKey, fmt.Sprintf(format, a...)) } // Debugw logs a message at debug level. func (h *Helper) Debugw(keyvals ...interface{}) { _ = h.logger.Log(LevelDebug, keyvals...) } // Info logs a message at info level. func (h *Helper) Info(a ...interface{}) { _ = h.logger.Log(LevelInfo, h.msgKey, fmt.Sprint(a...)) } // Infof logs a message at info level. func (h *Helper) Infof(format string, a ...interface{}) { _ = h.logger.Log(LevelInfo, h.msgKey, fmt.Sprintf(format, a...)) } // Infow logs a message at info level. func (h *Helper) Infow(keyvals ...interface{}) { _ = h.logger.Log(LevelInfo, keyvals...) } // Warn logs a message at warn level. func (h *Helper) Warn(a ...interface{}) { _ = h.logger.Log(LevelWarn, h.msgKey, fmt.Sprint(a...)) } // Warnf logs a message at warnf level. func (h *Helper) Warnf(format string, a ...interface{}) { _ = h.logger.Log(LevelWarn, h.msgKey, fmt.Sprintf(format, a...)) } // Warnw logs a message at warnf level. func (h *Helper) Warnw(keyvals ...interface{}) { _ = h.logger.Log(LevelWarn, keyvals...) } // Error logs a message at error level. func (h *Helper) Error(a ...interface{}) { _ = h.logger.Log(LevelError, h.msgKey, fmt.Sprint(a...)) } // Errorf logs a message at error level. func (h *Helper) Errorf(format string, a ...interface{}) { _ = h.logger.Log(LevelError, h.msgKey, fmt.Sprintf(format, a...)) } // Errorw logs a message at error level. func (h *Helper) Errorw(keyvals ...interface{}) { _ = h.logger.Log(LevelError, keyvals...) } // Fatal logs a message at fatal level. func (h *Helper) Fatal(a ...interface{}) { _ = h.logger.Log(LevelFatal, h.msgKey, fmt.Sprint(a...)) os.Exit(1) } // Fatalf logs a message at fatal level. func (h *Helper) Fatalf(format string, a ...interface{}) { _ = h.logger.Log(LevelFatal, h.msgKey, fmt.Sprintf(format, a...)) os.Exit(1) } // Fatalw logs a message at fatal level. func (h *Helper) Fatalw(keyvals ...interface{}) { _ = h.logger.Log(LevelFatal, keyvals...) os.Exit(1) } ================================================ FILE: log/helper_test.go ================================================ package log import ( "context" "io" "os" "testing" ) func TestHelper(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(logger) log.Log(LevelDebug, "msg", "test debug") log.Debug("test debug") log.Debugf("test %s", "debug") log.Debugw("log", "test debug") log.Warn("test warn") log.Warnf("test %s", "warn") log.Warnw("log", "test warn") } func TestHelperWithMsgKey(t *testing.T) { logger := With(DefaultLogger, "ts", DefaultTimestamp, "caller", DefaultCaller) log := NewHelper(logger, WithMessageKey("message")) log.Debugf("test %s", "debug") log.Debugw("log", "test debug") } func TestHelperLevel(t *testing.T) { log := NewHelper(DefaultLogger) log.Debug("test debug") log.Info("test info") log.Infof("test %s", "info") log.Warn("test warn") log.Error("test error") log.Errorf("test %s", "error") log.Errorw("log", "test error") } func BenchmarkHelperPrint(b *testing.B) { log := NewHelper(NewStdLogger(io.Discard)) for i := 0; i < b.N; i++ { log.Debug("test") } } func BenchmarkHelperPrintf(b *testing.B) { log := NewHelper(NewStdLogger(io.Discard)) for i := 0; i < b.N; i++ { log.Debugf("%s", "test") } } func BenchmarkHelperPrintw(b *testing.B) { log := NewHelper(NewStdLogger(io.Discard)) for i := 0; i < b.N; i++ { log.Debugw("key", "value") } } type traceKey struct{} func TestContext(t *testing.T) { logger := With(NewStdLogger(os.Stdout), "trace", Trace(), ) log := NewHelper(logger) ctx := context.WithValue(context.Background(), traceKey{}, "2233") log.WithContext(ctx).Info("got trace!") } func Trace() Valuer { return func(ctx context.Context) interface{} { s := ctx.Value(traceKey{}).(string) return s } } ================================================ FILE: log/level.go ================================================ package log import "strings" // Level is a logger level. type Level int8 // LevelKey is logger level key. const LevelKey = "level" const ( // LevelDebug is logger debug level. LevelDebug Level = iota - 1 // LevelInfo is logger info level. LevelInfo // LevelWarn is logger warn level. LevelWarn // LevelError is logger error level. LevelError // LevelFatal is logger fatal level LevelFatal ) func (l Level) String() string { switch l { case LevelDebug: return "DEBUG" case LevelInfo: return "INFO" case LevelWarn: return "WARN" case LevelError: return "ERROR" case LevelFatal: return "FATAL" default: return "" } } // ParseLevel parses a level string into a logger Level value. func ParseLevel(s string) Level { switch strings.ToUpper(s) { case "DEBUG": return LevelDebug case "INFO": return LevelInfo case "WARN": return LevelWarn case "ERROR": return LevelError case "FATAL": return LevelFatal } return LevelInfo } ================================================ FILE: log/level_test.go ================================================ package log import "testing" func TestLevel_String(t *testing.T) { tests := []struct { name string l Level want string }{ { name: "DEBUG", l: LevelDebug, want: "DEBUG", }, { name: "INFO", l: LevelInfo, want: "INFO", }, { name: "WARN", l: LevelWarn, want: "WARN", }, { name: "ERROR", l: LevelError, want: "ERROR", }, { name: "FATAL", l: LevelFatal, want: "FATAL", }, { name: "other", l: 10, want: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.l.String(); got != tt.want { t.Errorf("String() = %v, want %v", got, tt.want) } }) } } func TestParseLevel(t *testing.T) { tests := []struct { name string s string want Level }{ { name: "DEBUG", want: LevelDebug, s: "DEBUG", }, { name: "INFO", want: LevelInfo, s: "INFO", }, { name: "WARN", want: LevelWarn, s: "WARN", }, { name: "ERROR", want: LevelError, s: "ERROR", }, { name: "FATAL", want: LevelFatal, s: "FATAL", }, { name: "other", want: LevelInfo, s: "other", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := ParseLevel(tt.s); got != tt.want { t.Errorf("ParseLevel() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: log/log.go ================================================ package log import ( "context" "log" ) // DefaultLogger is default logger. var DefaultLogger = NewStdLogger(log.Writer()) // Logger is a logger interface. type Logger interface { Log(level Level, keyvals ...interface{}) error } type logger struct { logs []Logger prefix []interface{} hasValuer bool ctx context.Context } func (c *logger) Log(level Level, keyvals ...interface{}) error { kvs := make([]interface{}, 0, len(c.prefix)+len(keyvals)) kvs = append(kvs, c.prefix...) if c.hasValuer { bindValues(c.ctx, kvs) } kvs = append(kvs, keyvals...) for _, l := range c.logs { if err := l.Log(level, kvs...); err != nil { return err } } return nil } // With with logger fields. func With(l Logger, kv ...interface{}) Logger { if c, ok := l.(*logger); ok { kvs := make([]interface{}, 0, len(c.prefix)+len(kv)) kvs = append(kvs, kv...) kvs = append(kvs, c.prefix...) return &logger{ logs: c.logs, prefix: kvs, hasValuer: containsValuer(kvs), ctx: c.ctx, } } return &logger{logs: []Logger{l}, prefix: kv, hasValuer: containsValuer(kv)} } // WithContext returns a shallow copy of l with its context changed // to ctx. The provided ctx must be non-nil. func WithContext(ctx context.Context, l Logger) Logger { if c, ok := l.(*logger); ok { return &logger{ logs: c.logs, prefix: c.prefix, hasValuer: c.hasValuer, ctx: ctx, } } return &logger{logs: []Logger{l}, ctx: ctx} } // MultiLogger wraps multi logger. func MultiLogger(logs ...Logger) Logger { return &logger{logs: logs} } ================================================ FILE: log/log_test.go ================================================ package log import ( "context" "os" "testing" ) func TestInfo(t *testing.T) { logger := DefaultLogger logger = With(logger, "ts", DefaultTimestamp, "caller", DefaultCaller) _ = logger.Log(LevelInfo, "key1", "value1") } func TestWrapper(t *testing.T) { out := NewStdLogger(os.Stdout) err := NewStdLogger(os.Stderr) l := With(MultiLogger(out, err), "ts", DefaultTimestamp, "caller", DefaultCaller) _ = l.Log(LevelInfo, "msg", "test") } func TestWithContext(t *testing.T) { WithContext(context.Background(), nil) } ================================================ FILE: log/std.go ================================================ package log import ( "bytes" "fmt" "io" "log" "sync" ) var _ Logger = (*stdLogger)(nil) type stdLogger struct { log *log.Logger pool *sync.Pool } // NewStdLogger new a logger with writer. func NewStdLogger(w io.Writer) Logger { return &stdLogger{ log: log.New(w, "", 0), pool: &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, }, } } // Log print the kv pairs log. func (l *stdLogger) Log(level Level, keyvals ...interface{}) error { if len(keyvals) == 0 { return nil } if (len(keyvals) & 1) == 1 { keyvals = append(keyvals, "KEYVALS UNPAIRED") } buf := l.pool.Get().(*bytes.Buffer) buf.WriteString(level.String()) for i := 0; i < len(keyvals); i += 2 { _, _ = fmt.Fprintf(buf, " %s=%v", keyvals[i], keyvals[i+1]) } _ = l.log.Output(4, buf.String()) //nolint:gomnd buf.Reset() l.pool.Put(buf) return nil } ================================================ FILE: log/std_test.go ================================================ package log import "testing" func TestStdLogger(t *testing.T) { logger := DefaultLogger logger = With(logger, "caller", DefaultCaller, "ts", DefaultTimestamp) _ = logger.Log(LevelInfo, "msg", "test debug") _ = logger.Log(LevelInfo, "msg", "test info") _ = logger.Log(LevelInfo, "msg", "test warn") _ = logger.Log(LevelInfo, "msg", "test error") _ = logger.Log(LevelDebug, "singular") logger2 := DefaultLogger _ = logger2.Log(LevelDebug) } ================================================ FILE: log/value.go ================================================ package log import ( "context" "runtime" "strconv" "strings" "time" ) var ( defaultDepth = 3 // DefaultCaller is a Valuer that returns the file and line. DefaultCaller = Caller(defaultDepth) // DefaultTimestamp is a Valuer that returns the current wallclock time. DefaultTimestamp = Timestamp(time.RFC3339) ) // Valuer is returns a log value. type Valuer func(ctx context.Context) interface{} // Value return the function value. func Value(ctx context.Context, v interface{}) interface{} { if v, ok := v.(Valuer); ok { return v(ctx) } return v } // Caller returns a Valuer that returns a pkg/file:line description of the caller. func Caller(depth int) Valuer { return func(context.Context) interface{} { d := depth _, file, line, _ := runtime.Caller(d) if strings.LastIndex(file, "/log/filter.go") > 0 { d++ _, file, line, _ = runtime.Caller(d) } if strings.LastIndex(file, "/log/helper.go") > 0 { d++ _, file, line, _ = runtime.Caller(d) } idx := strings.LastIndexByte(file, '/') return file[idx+1:] + ":" + strconv.Itoa(line) } } // Timestamp returns a timestamp Valuer with a custom time format. func Timestamp(layout string) Valuer { return func(context.Context) interface{} { return time.Now().Format(layout) } } func bindValues(ctx context.Context, keyvals []interface{}) { for i := 1; i < len(keyvals); i += 2 { if v, ok := keyvals[i].(Valuer); ok { keyvals[i] = v(ctx) } } } func containsValuer(keyvals []interface{}) bool { for i := 1; i < len(keyvals); i += 2 { if _, ok := keyvals[i].(Valuer); ok { return true } } return false } ================================================ FILE: log/value_test.go ================================================ package log import ( "context" "testing" ) func TestValue(t *testing.T) { logger := DefaultLogger logger = With(logger, "ts", DefaultTimestamp, "caller", DefaultCaller) _ = logger.Log(LevelInfo, "msg", "helloworld") logger = DefaultLogger logger = With(logger) _ = logger.Log(LevelDebug, "msg", "helloworld") var v1 interface{} got := Value(context.Background(), v1) if got != v1 { t.Errorf("Value() = %v, want %v", got, v1) } var v2 Valuer = func(ctx context.Context) interface{} { return 3 } got = Value(context.Background(), v2) res := got.(int) if res != 3 { t.Errorf("Value() = %v, want %v", res, 3) } } ================================================ FILE: ntheader.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // ImageFileHeaderMachineType represents the type of the image file header `Machine“ field. type ImageFileHeaderMachineType uint16 // ImageFileHeaderCharacteristicsType represents the type of the image file header // `Characteristics` field. type ImageFileHeaderCharacteristicsType uint16 // ImageOptionalHeaderSubsystemType represents the type of the optional header `Subsystem field. type ImageOptionalHeaderSubsystemType uint16 // ImageOptionalHeaderDllCharacteristicsType represents the type of the optional header `DllCharacteristics field. type ImageOptionalHeaderDllCharacteristicsType uint16 // ImageNtHeader represents the PE header and is the general term for a structure // named IMAGE_NT_HEADERS. type ImageNtHeader struct { // Signature is a DWORD containing the value 50h, 45h, 00h, 00h. Signature uint32 `json:"signature"` // IMAGE_NT_HEADERS provides a standard COFF header. It is located // immediately after the PE signature. The COFF header provides the most // general characteristics of a PE/COFF file, applicable to both object and // executable files. It is represented with IMAGE_FILE_HEADER structure. FileHeader ImageFileHeader `json:"file_header"` // OptionalHeader is of type ImageOptionalHeader32 or ImageOptionalHeader64. OptionalHeader interface{} `json:"optional_header"` } // ImageFileHeader contains infos about the physical layout and properties of the // file. type ImageFileHeader struct { // The number that identifies the type of target machine. Machine ImageFileHeaderMachineType `json:"machine"` // The number of sections. This indicates the size of the section table, // which immediately follows the headers. NumberOfSections uint16 `json:"number_of_sections"` // // The low 32 bits of the number of seconds since 00:00 January 1, 1970 // (a C run-time time_t value), that indicates when the file was created. TimeDateStamp uint32 `json:"time_date_stamp"` // // The file offset of the COFF symbol table, or zero if no COFF symbol // table is present. This value should be zero for an image because COFF // debugging information is deprecated. PointerToSymbolTable uint32 `json:"pointer_to_symbol_table"` // The number of entries in the symbol table. This data can be used to // locate the string table, which immediately follows the symbol table. // This value should be zero for an image because COFF debugging information // is deprecated. NumberOfSymbols uint32 `json:"number_of_symbols"` // The size of the optional header, which is required for executable files // but not for object files. This value should be zero for an object file. SizeOfOptionalHeader uint16 `json:"size_of_optional_header"` // The flags that indicate the attributes of the file. Characteristics ImageFileHeaderCharacteristicsType `json:"characteristics"` } // ImageOptionalHeader32 represents the PE32 format structure of the optional header. // PE32 contains this additional field, which is absent in PE32+. type ImageOptionalHeader32 struct { // The unsigned integer that identifies the state of the image file. // The most common number is 0x10B, which identifies it as a normal // executable file. 0x107 identifies it as a ROM image, and 0x20B identifies // it as a PE32+ executable. Magic uint16 `json:"magic"` // Linker major version number. The VC++ linker sets this field to current // version of Visual Studio. MajorLinkerVersion uint8 `json:"major_linker_version"` // The linker minor version number. MinorLinkerVersion uint8 `json:"minor_linker_version"` // The size of the code (text) section, or the sum of all code sections // if there are multiple sections. SizeOfCode uint32 `json:"size_of_code"` // The size of the initialized data section (held in the field SizeOfRawData // of the respective section header), or the sum of all such sections if // there are multiple data sections. SizeOfInitializedData uint32 `json:"size_of_initialized_data"` // The size of the uninitialized data section (BSS), or the sum of all // such sections if there are multiple BSS sections. This data is not part // of the disk file and does not have specific values, but the OS loader // commits memory space for this data when the file is loaded. SizeOfUninitializedData uint32 `json:"size_of_uninitialized_data"` // The address of the entry point relative to the image base when the // executable file is loaded into memory. For program images, this is the // starting address. For device drivers, this is the address of the // initialization function. An entry point is optional for DLLs. When no // entry point is present, this field must be zero. For managed PE files, // this value always points to the common language runtime invocation stub. AddressOfEntryPoint uint32 `json:"address_of_entrypoint"` // The address that is relative to the image base of the beginning-of-code // section when it is loaded into memory. BaseOfCode uint32 `json:"base_of_code"` // The address that is relative to the image base of the beginning-of-data // section when it is loaded into memory. This entry doesn’t exist in the // 64-bit Optional header. BaseOfData uint32 `json:"base_of_data"` // The preferred address of the first byte of image when loaded into memory; // must be a multiple of 64 K. The default for DLLs is 0x10000000. The // default for Windows CE EXEs is 0x00010000. The default for Windows NT, // Windows 2000, Windows XP, Windows 95, Windows 98, and Windows Me is // 0x00400000. ImageBase uint32 `json:"image_base"` // The alignment (in bytes) of sections when they are loaded into memory. // It must be greater than or equal to FileAlignment. The default is the // page size for the architecture. SectionAlignment uint32 `json:"section_alignment"` // The alignment factor (in bytes) that is used to align the raw data of // sections in the image file. The value should be a power of 2 between 512 // and 64 K, inclusive. The default is 512. If the SectionAlignment is less // than the architecture's page size, then FileAlignment must match // SectionAlignment. FileAlignment uint32 `json:"file_alignment"` // The major version number of the required operating system. MajorOperatingSystemVersion uint16 `json:"major_os_version"` // The minor version number of the required operating system. MinorOperatingSystemVersion uint16 `json:"minor_os_version"` // The major version number of the image. MajorImageVersion uint16 `json:"major_image_version"` // The minor version number of the image. MinorImageVersion uint16 `json:"minor_image_version"` // The major version number of the subsystem. MajorSubsystemVersion uint16 `json:"major_subsystem_version"` // The minor version number of the subsystem. MinorSubsystemVersion uint16 `json:"minor_subsystem_version"` // Reserved, must be zero. Win32VersionValue uint32 `json:"win32_version_value"` // The size (in bytes) of the image, including all headers, as the image // is loaded in memory. It must be a multiple of SectionAlignment. SizeOfImage uint32 `json:"size_of_image"` // The combined size of an MS-DOS stub, PE header, and section headers // rounded up to a multiple of FileAlignment. SizeOfHeaders uint32 `json:"size_of_headers"` // The image file checksum. The algorithm for computing the checksum is // incorporated into IMAGHELP.DLL. The following are checked for validation // at load time: all drivers, any DLL loaded at boot time, and any DLL // that is loaded into a critical Windows process. CheckSum uint32 `json:"checksum"` // The subsystem that is required to run this image. Subsystem ImageOptionalHeaderSubsystemType `json:"subsystem"` // For more information, see DLL Characteristics later in this specification. DllCharacteristics ImageOptionalHeaderDllCharacteristicsType `json:"dll_characteristics"` // Size of virtual memory to reserve for the initial thread’s stack. Only // the SizeOfStackCommit field is committed; the rest is available in // one-page increments. The default is 1MB for 32-bit images and 4MB for // 64-bit images. SizeOfStackReserve uint32 `json:"size_of_stack_reserve"` // Size of virtual memory initially committed for the initial thread’s // stack. The default is one page (4KB) for 32-bit images and 16KB for // 64-bit images. SizeOfStackCommit uint32 `json:"size_of_stack_commit"` // size of the local heap space to reserve. Only SizeOfHeapCommit is // committed; the rest is made available one page at a time until the // reserve size is reached. The default is 1MB for both 32-bit and 64-bit // images. SizeOfHeapReserve uint32 `json:"size_of_heap_reserve"` // Size of virtual memory initially committed for the process heap. The // default is 4KB (one operating system memory page) for 32-bit images and // 16KB for 64-bit images. SizeOfHeapCommit uint32 `json:"size_of_heap_commit"` // Reserved, must be zero. LoaderFlags uint32 `json:"loader_flags"` // Number of entries in the DataDirectory array; at least 16. Although it // is theoretically possible to emit more than 16 data directories, all // existing managed compilers emit exactly 16 data directories, with the // 16th (last) data directory never used (reserved). NumberOfRvaAndSizes uint32 `json:"number_of_rva_and_sizes"` // An array of 16 IMAGE_DATA_DIRECTORY structures. DataDirectory [16]DataDirectory `json:"data_directories"` } // ImageOptionalHeader64 represents the PE32+ format structure of the optional header. type ImageOptionalHeader64 struct { // The unsigned integer that identifies the state of the image file. // The most common number is 0x10B, which identifies it as a normal // executable file. 0x107 identifies it as a ROM image, and 0x20B identifies // it as a PE32+ executable. Magic uint16 `json:"magic"` // Linker major version number. The VC++ linker sets this field to current // version of Visual Studio. MajorLinkerVersion uint8 `json:"major_linker_version"` // The linker minor version number. MinorLinkerVersion uint8 `json:"minor_linker_version"` // The size of the code (text) section, or the sum of all code sections // if there are multiple sections. SizeOfCode uint32 `json:"size_of_code"` // The size of the initialized data section (held in the field SizeOfRawData // of the respective section header), or the sum of all such sections if // there are multiple data sections. SizeOfInitializedData uint32 `json:"size_of_initialized_data"` // The size of the uninitialized data section (BSS), or the sum of all // such sections if there are multiple BSS sections. This data is not part // of the disk file and does not have specific values, but the OS loader // commits memory space for this data when the file is loaded. SizeOfUninitializedData uint32 `json:"size_of_uninitialized_data"` // The address of the entry point relative to the image base when the // executable file is loaded into memory. For program images, this is the // starting address. For device drivers, this is the address of the // initialization function. An entry point is optional for DLLs. When no // entry point is present, this field must be zero. For managed PE files, // this value always points to the common language runtime invocation stub. AddressOfEntryPoint uint32 `json:"address_of_entrypoint"` // The address that is relative to the image base of the beginning-of-code // section when it is loaded into memory. BaseOfCode uint32 `json:"base_of_code"` // In PE+, ImageBase is 8 bytes size. ImageBase uint64 `json:"image_base"` // The alignment (in bytes) of sections when they are loaded into memory. // It must be greater than or equal to FileAlignment. The default is the // page size for the architecture. SectionAlignment uint32 `json:"section_alignment"` // The alignment factor (in bytes) that is used to align the raw data of // sections in the image file. The value should be a power of 2 between 512 // and 64 K, inclusive. The default is 512. If the SectionAlignment is less // than the architecture's page size, then FileAlignment must match SectionAlignment. FileAlignment uint32 `json:"file_alignment"` // The major version number of the required operating system. MajorOperatingSystemVersion uint16 `json:"major_os_version"` // The minor version number of the required operating system. MinorOperatingSystemVersion uint16 `json:"minor_os_version"` // The major version number of the image. MajorImageVersion uint16 `json:"major_image_version"` // The minor version number of the image. MinorImageVersion uint16 `json:"minor_image_version"` // The major version number of the subsystem. MajorSubsystemVersion uint16 `json:"major_subsystem_version"` // The minor version number of the subsystem. MinorSubsystemVersion uint16 `json:"minor_subsystem_version"` // Reserved, must be zero. Win32VersionValue uint32 `json:"win32_version_value"` // The size (in bytes) of the image, including all headers, as the image // is loaded in memory. It must be a multiple of SectionAlignment. SizeOfImage uint32 `json:"size_of_image"` // The combined size of an MS-DOS stub, PE header, and section headers // rounded up to a multiple of FileAlignment. SizeOfHeaders uint32 `json:"size_of_headers"` // The image file checksum. The algorithm for computing the checksum is // incorporated into IMAGHELP.DLL. The following are checked for validation // at load time: all drivers, any DLL loaded at boot time, and any DLL // that is loaded into a critical Windows process. CheckSum uint32 `json:"checksum"` // The subsystem that is required to run this image. Subsystem ImageOptionalHeaderSubsystemType `json:"subsystem"` // For more information, see DLL Characteristics later in this specification. DllCharacteristics ImageOptionalHeaderDllCharacteristicsType `json:"dll_characteristics"` // Size of virtual memory to reserve for the initial thread’s stack. Only // the SizeOfStackCommit field is committed; the rest is available in // one-page increments. The default is 1MB for 32-bit images and 4MB for // 64-bit images. SizeOfStackReserve uint64 `json:"size_of_stack_reserve"` // Size of virtual memory initially committed for the initial thread’s // stack. The default is one page (4KB) for 32-bit images and 16KB for // 64-bit images. SizeOfStackCommit uint64 `json:"size_of_stack_commit"` // size of the local heap space to reserve. Only SizeOfHeapCommit is // committed; the rest is made available one page at a time until the // reserve size is reached. The default is 1MB for both 32-bit and 64-bit // images. SizeOfHeapReserve uint64 `json:"size_of_heap_reserve"` // Size of virtual memory initially committed for the process heap. The // default is 4KB (one operating system memory page) for 32-bit images and // 16KB for 64-bit images. SizeOfHeapCommit uint64 `json:"size_of_heap_commit"` // Reserved, must be zero. LoaderFlags uint32 `json:"loader_flags"` // Number of entries in the DataDirectory array; at least 16. Although it // is theoretically possible to emit more than 16 data directories, all // existing managed compilers emit exactly 16 data directories, with the // 16th (last) data directory never used (reserved). NumberOfRvaAndSizes uint32 `json:"number_of_rva_and_sizes"` // An array of 16 IMAGE_DATA_DIRECTORY structures. DataDirectory [16]DataDirectory `json:"data_directories"` } // DataDirectory represents an array of 16 IMAGE_DATA_DIRECTORY structures, // 8 bytes apiece, each relating to an important data structure in the PE file. // The data directory table starts at offset 96 in a 32-bit PE header and at // offset 112 in a 64-bit PE header. Each entry in the data directory table // contains the RVA and size of a table or a string that this particular // directory entry describes;this information is used by the operating system. type DataDirectory struct { // The RVA of the data structure. VirtualAddress uint32 `json:"virtual_address"` // The size in bytes of the data structure referred to. Size uint32 `json:"size"` } // ParseNTHeader parse the PE NT header structure referred as IMAGE_NT_HEADERS. // Its offset is given by the e_lfanew field in the IMAGE_DOS_HEADER at the // beginning of the file. func (pe *File) ParseNTHeader() (err error) { ntHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader signature, err := pe.ReadUint32(ntHeaderOffset) if err != nil { return ErrInvalidNtHeaderOffset } // Probe for PE signature. if signature&0xFFFF == ImageOS2Signature { return ErrImageOS2SignatureFound } if signature&0xFFFF == ImageOS2LESignature { return ErrImageOS2LESignatureFound } if signature&0xFFFF == ImageVXDSignature { return ErrImageVXDSignatureFound } if signature&0xFFFF == ImageTESignature { return ErrImageTESignatureFound } // This is the smallest requirement for a valid PE. if signature != ImageNTSignature { return ErrImageNtSignatureNotFound } pe.NtHeader.Signature = signature // The file header structure contains some basic information about the file; // most importantly, a field describing the size of the optional data that // follows it. fileHeaderSize := uint32(binary.Size(pe.NtHeader.FileHeader)) fileHeaderOffset := ntHeaderOffset + 4 err = pe.structUnpack(&pe.NtHeader.FileHeader, fileHeaderOffset, fileHeaderSize) if err != nil { return err } // The PE header which immediately follows the COFF header, provides // information for the OS loader. Although this header is referred to as // the optional header, it is optional only in the sense that object files // usually don’t contain it. For PE files, this header is mandatory. // The size of the PE header is not fixed. It depends on the number of data // directories defined in the header and is specified in the // SizeOfOptionalHeader field of the COFF header. // The optional header could be either for a PE or PE+ file. oh32 := ImageOptionalHeader32{} oh64 := ImageOptionalHeader64{} optHeaderOffset := ntHeaderOffset + (fileHeaderSize + 4) magic, err := pe.ReadUint16(optHeaderOffset) if err != nil { return err } // Probes for PE32/PE32+ optional header magic. if magic != ImageNtOptionalHeader32Magic && magic != ImageNtOptionalHeader64Magic { return ErrImageNtOptionalHeaderMagicNotFound } // Are we dealing with a PE64 optional header. switch magic { case ImageNtOptionalHeader64Magic: size := uint32(binary.Size(oh64)) err = pe.structUnpack(&oh64, optHeaderOffset, size) if err != nil { return err } pe.Is64 = true pe.NtHeader.OptionalHeader = oh64 case ImageNtOptionalHeader32Magic: size := uint32(binary.Size(oh32)) err = pe.structUnpack(&oh32, optHeaderOffset, size) if err != nil { return err } pe.Is32 = true pe.NtHeader.OptionalHeader = oh32 } // ImageBase should be multiple of 10000h. if (pe.Is64 && oh64.ImageBase%0x10000 != 0) || (pe.Is32 && oh32.ImageBase%0x10000 != 0) { return ErrImageBaseNotAligned } // ImageBase can be any value as long as: // ImageBase + SizeOfImage < 80000000h for PE32. // ImageBase + SizeOfImage < 0xffff080000000000 for PE32+. if (pe.Is32 && oh32.ImageBase+oh32.SizeOfImage >= 0x80000000) || (pe.Is64 && oh64.ImageBase+uint64(oh64.SizeOfImage) >= 0xffff080000000000) { pe.Anomalies = append(pe.Anomalies, AnoImageBaseOverflow) } pe.HasNTHdr = true return nil } // String returns the string representations of the `Machine` field of the IMAGE_FILE_HEADER. func (t ImageFileHeaderMachineType) String() string { machineType := map[ImageFileHeaderMachineType]string{ ImageFileMachineUnknown: "Unknown", ImageFileMachineAM33: "Matsushita AM33", ImageFileMachineAMD64: "x64", ImageFileMachineARM: "ARM little endian", ImageFileMachineARM64: "ARM64 little endian", ImageFileMachineARMNT: "ARM Thumb-2 little endian", ImageFileMachineEBC: "EFI byte code", ImageFileMachineI386: "Intel 386 or later / compatible processors", ImageFileMachineIA64: "Intel Itanium processor family", ImageFileMachineM32R: "Mitsubishi M32R little endian", ImageFileMachineMIPS16: "MIPS16", ImageFileMachineMIPSFPU: "MIPS with FPU", ImageFileMachineMIPSFPU16: "MIPS16 with FPU", ImageFileMachinePowerPC: "Power PC little endian", ImageFileMachinePowerPCFP: "Power PC with floating point support", ImageFileMachineR4000: "MIPS little endian", ImageFileMachineRISCV32: "RISC-V 32-bit address space", ImageFileMachineRISCV64: "RISC-V 64-bit address space", ImageFileMachineRISCV128: "RISC-V 128-bit address space", ImageFileMachineSH3: "Hitachi SH3", ImageFileMachineSH3DSP: "Hitachi SH3 DSP", ImageFileMachineSH4: "Hitachi SH4", ImageFileMachineSH5: "Hitachi SH5", ImageFileMachineTHUMB: "Thumb", ImageFileMachineWCEMIPSv2: "MIPS little-endian WCE v2", } if val, ok := machineType[t]; ok { return val } return "?" } // String returns the string representations of the `Characteristics` field of the IMAGE_FILE_HEADER. func (t ImageFileHeaderCharacteristicsType) String() []string { var values []string fileHeaderCharacteristics := map[ImageFileHeaderCharacteristicsType]string{ ImageFileRelocsStripped: "RelocsStripped", ImageFileExecutableImage: "ExecutableImage", ImageFileLineNumsStripped: "LineNumsStripped", ImageFileLocalSymsStripped: "LocalSymsStripped", ImageFileAggressiveWSTrim: "AgressibeWsTrim", ImageFileLargeAddressAware: "LargeAddressAware", ImageFileBytesReservedLow: "BytesReservedLow", ImageFile32BitMachine: "32BitMachine", ImageFileDebugStripped: "DebugStripped", ImageFileRemovableRunFromSwap: "RemovableRunFromSwap", ImageFileSystem: "FileSystem", ImageFileDLL: "DLL", ImageFileUpSystemOnly: "UpSystemOnly", ImageFileBytesReservedHigh: "BytesReservedHigh", } for k, s := range fileHeaderCharacteristics { if k&t != 0 { values = append(values, s) } } return values } // String returns the string representations of the `DllCharacteristics` field of ImageOptionalHeader. func (t ImageOptionalHeaderDllCharacteristicsType) String() []string { var values []string imgDllCharacteristics := map[ImageOptionalHeaderDllCharacteristicsType]string{ ImageDllCharacteristicsHighEntropyVA: "HighEntropyVA", ImageDllCharacteristicsDynamicBase: "DynamicBase", ImageDllCharacteristicsForceIntegrity: "ForceIntegrity", ImageDllCharacteristicsNXCompact: "NXCompact", ImageDllCharacteristicsNoIsolation: "NoIsolation", ImageDllCharacteristicsNoSEH: "NoSEH", ImageDllCharacteristicsNoBind: "NoBind", ImageDllCharacteristicsAppContainer: "AppContainer", ImageDllCharacteristicsWdmDriver: "WdmDriver", ImageDllCharacteristicsGuardCF: "GuardCF", ImageDllCharacteristicsTerminalServiceAware: "TerminalServiceAware", } for k, s := range imgDllCharacteristics { if k&t != 0 { values = append(values, s) } } return values } // String returns the string representations of the `Subsystem` field // of ImageOptionalHeader. func (subsystem ImageOptionalHeaderSubsystemType) String() string { subsystemMap := map[ImageOptionalHeaderSubsystemType]string{ ImageSubsystemUnknown: "Unknown", ImageSubsystemNative: "Native", ImageSubsystemWindowsGUI: "Windows GUI", ImageSubsystemWindowsCUI: "Windows CUI", ImageSubsystemOS2CUI: "OS/2 character", ImageSubsystemPosixCUI: "POSIX character", ImageSubsystemNativeWindows: "Native Win9x driver", ImageSubsystemWindowsCEGUI: "Windows CE GUI", ImageSubsystemEFIApplication: "EFI Application", ImageSubsystemEFIBootServiceDriver: "EFI Boot Service Driver", ImageSubsystemEFIRuntimeDriver: "EFI ROM image", ImageSubsystemEFIRom: "EFI ROM image", ImageSubsystemXBOX: "XBOX", ImageSubsystemWindowsBootApplication: "Windows boot application", } if val, ok := subsystemMap[subsystem]; ok { return val } return "?" } // PrettyOptionalHeaderMagic returns the string representations of the // `Magic` field of ImageOptionalHeader. func (pe *File) PrettyOptionalHeaderMagic() string { var magic uint16 if pe.Is64 { magic = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).Magic } else { magic = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).Magic } switch magic { case ImageNtOptionalHeader32Magic: return "PE32" case ImageNtOptionalHeader64Magic: return "PE64" case ImageROMOptionalHeaderMagic: return "ROM" default: return "?" } } ================================================ FILE: ntheader_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "sort" "strconv" "testing" ) func TestParseNtHeaderNE(t *testing.T) { tests := []struct { in string out error }{ { // This is an NE executable file. Extracted from Windows CE 2.0. getAbsoluteFilePath("test/_setup.dll"), ErrImageOS2SignatureFound, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != tt.out { t.Fatalf("parsing nt header failed, got %v, want %v", err, tt.out) } }) } } func TestNtHeaderMachineType(t *testing.T) { tests := []struct { in ImageFileHeaderMachineType out string }{ { ImageFileHeaderMachineType(0x8664), "x64", }, { ImageFileHeaderMachineType(0xffff), "?", }, } for _, tt := range tests { name := "CaseNtHeaderMachineTypeEqualTo_" + strconv.Itoa(int(tt.in)) t.Run(name, func(t *testing.T) { got := tt.in.String() if got != tt.out { t.Errorf("nt header machine type assertion failed, got %v, want %v", got, tt.out) } }) } } func TestNtHeaderCharacteristicsType(t *testing.T) { tests := []struct { in ImageFileHeaderCharacteristicsType out []string }{ { ImageFileHeaderCharacteristicsType(0x0022), []string{"ExecutableImage", "LargeAddressAware"}, }, } for _, tt := range tests { name := "CaseNtHeaderCharacteristicsTypeEqualTo_" + strconv.Itoa(int(tt.in)) t.Run(name, func(t *testing.T) { got := tt.in.String() sort.Strings(got) sort.Strings(tt.out) if !reflect.DeepEqual(got, tt.out) { t.Errorf("nt header Characteristics type assertion failed, got %v, want %v", got, tt.out) } }) } } func TestOptionalHeaderSubsystemType(t *testing.T) { tests := []struct { in ImageOptionalHeaderSubsystemType out string }{ { ImageOptionalHeaderSubsystemType(0x2), "Windows GUI", }, { ImageOptionalHeaderSubsystemType(0xff), "?", }, } for _, tt := range tests { name := "CaseOptionalHeaderSubsystemTypeEqualTo_" + strconv.Itoa(int(tt.in)) t.Run(name, func(t *testing.T) { got := tt.in.String() if got != tt.out { t.Errorf("optional header subsystem type assertion failed, got %v, want %v", got, tt.out) } }) } } func TestOptionalHeaderDllCharacteristicsType(t *testing.T) { tests := []struct { in ImageOptionalHeaderDllCharacteristicsType out []string }{ { ImageOptionalHeaderDllCharacteristicsType(0x8160), []string{"DynamicBase", "HighEntropyVA", "NXCompact", "TerminalServiceAware"}, }, } for _, tt := range tests { name := "CaseOptionalHeaderDllCharacteristicsTypeEqualTo_" + strconv.Itoa(int(tt.in)) t.Run(name, func(t *testing.T) { got := tt.in.String() sort.Strings(got) sort.Strings(tt.out) if !reflect.DeepEqual(got, tt.out) { t.Errorf("optional header dll characteristics type assertion failed, got %v, want %v", got, tt.out) } }) } } ================================================ FILE: ordlookup.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "fmt" "strings" ) // WS232OrdNames maps ordinals to name. var WS232OrdNames = map[uint64]string{ 1: "accept", 2: "bind", 3: "closesocket", 4: "connect", 5: "getpeername", 6: "getsockname", 7: "getsockopt", 8: "htonl", 9: "htons", 10: "ioctlsocket", 11: "inet_addr", 12: "inet_ntoa", 13: "listen", 14: "ntohl", 15: "ntohs", 16: "recv", 17: "recvfrom", 18: "select", 19: "send", 20: "sendto", 21: "setsockopt", 22: "shutdown", 23: "socket", 24: "GetAddrInfoW", 25: "GetNameInfoW", 26: "WSApSetPostRoutine", 27: "FreeAddrInfoW", 28: "WPUCompleteOverlappedRequest", 29: "WSAAccept", 30: "WSAAddressToStringA", 31: "WSAAddressToStringW", 32: "WSACloseEvent", 33: "WSAConnect", 34: "WSACreateEvent", 35: "WSADuplicateSocketA", 36: "WSADuplicateSocketW", 37: "WSAEnumNameSpaceProvidersA", 38: "WSAEnumNameSpaceProvidersW", 39: "WSAEnumNetworkEvents", 40: "WSAEnumProtocolsA", 41: "WSAEnumProtocolsW", 42: "WSAEventSelect", 43: "WSAGetOverlappedResult", 44: "WSAGetQOSByName", 45: "WSAGetServiceClassInfoA", 46: "WSAGetServiceClassInfoW", 47: "WSAGetServiceClassNameByClassIdA", 48: "WSAGetServiceClassNameByClassIdW", 49: "WSAHtonl", 50: "WSAHtons", 51: "gethostbyaddr", 52: "gethostbyname", 53: "getprotobyname", 54: "getprotobynumber", 55: "getservbyname", 56: "getservbyport", 57: "gethostname", 58: "WSAInstallServiceClassA", 59: "WSAInstallServiceClassW", 60: "WSAIoctl", 61: "WSAJoinLeaf", 62: "WSALookupServiceBeginA", 63: "WSALookupServiceBeginW", 64: "WSALookupServiceEnd", 65: "WSALookupServiceNextA", 66: "WSALookupServiceNextW", 67: "WSANSPIoctl", 68: "WSANtohl", 69: "WSANtohs", 70: "WSAProviderConfigChange", 71: "WSARecv", 72: "WSARecvDisconnect", 73: "WSARecvFrom", 74: "WSARemoveServiceClass", 75: "WSAResetEvent", 76: "WSASend", 77: "WSASendDisconnect", 78: "WSASendTo", 79: "WSASetEvent", 80: "WSASetServiceA", 81: "WSASetServiceW", 82: "WSASocketA", 83: "WSASocketW", 84: "WSAStringToAddressA", 85: "WSAStringToAddressW", 86: "WSAWaitForMultipleEvents", 87: "WSCDeinstallProvider", 88: "WSCEnableNSProvider", 89: "WSCEnumProtocols", 90: "WSCGetProviderPath", 91: "WSCInstallNameSpace", 92: "WSCInstallProvider", 93: "WSCUnInstallNameSpace", 94: "WSCUpdateProvider", 95: "WSCWriteNameSpaceOrder", 96: "WSCWriteProviderOrder", 97: "freeaddrinfo", 98: "getaddrinfo", 99: "getnameinfo", 101: "WSAAsyncSelect", 102: "WSAAsyncGetHostByAddr", 103: "WSAAsyncGetHostByName", 104: "WSAAsyncGetProtoByNumber", 105: "WSAAsyncGetProtoByName", 106: "WSAAsyncGetServByPort", 107: "WSAAsyncGetServByName", 108: "WSACancelAsyncRequest", 109: "WSASetBlockingHook", 110: "WSAUnhookBlockingHook", 111: "WSAGetLastError", 112: "WSASetLastError", 113: "WSACancelBlockingCall", 114: "WSAIsBlocking", 115: "WSAStartup", 116: "WSACleanup", 151: "__WSAFDIsSet", 500: "WEP", } // OleAut32OrdNames maps ordinals to names. var OleAut32OrdNames = map[uint64]string{ 2: "SysAllocString", 3: "SysReAllocString", 4: "SysAllocStringLen", 5: "SysReAllocStringLen", 6: "SysFreeString", 7: "SysStringLen", 8: "VariantInit", 9: "VariantClear", 10: "VariantCopy", 11: "VariantCopyInd", 12: "VariantChangeType", 13: "VariantTimeToDosDateTime", 14: "DosDateTimeToVariantTime", 15: "SafeArrayCreate", 16: "SafeArrayDestroy", 17: "SafeArrayGetDim", 18: "SafeArrayGetElemsize", 19: "SafeArrayGetUBound", 20: "SafeArrayGetLBound", 21: "SafeArrayLock", 22: "SafeArrayUnlock", 23: "SafeArrayAccessData", 24: "SafeArrayUnaccessData", 25: "SafeArrayGetElement", 26: "SafeArrayPutElement", 27: "SafeArrayCopy", 28: "DispGetParam", 29: "DispGetIDsOfNames", 30: "DispInvoke", 31: "CreateDispTypeInfo", 32: "CreateStdDispatch", 33: "RegisterActiveObject", 34: "RevokeActiveObject", 35: "GetActiveObject", 36: "SafeArrayAllocDescriptor", 37: "SafeArrayAllocData", 38: "SafeArrayDestroyDescriptor", 39: "SafeArrayDestroyData", 40: "SafeArrayRedim", 41: "SafeArrayAllocDescriptorEx", 42: "SafeArrayCreateEx", 43: "SafeArrayCreateVectorEx", 44: "SafeArraySetRecordInfo", 45: "SafeArrayGetRecordInfo", 46: "VarParseNumFromStr", 47: "VarNumFromParseNum", 48: "VarI2FromUI1", 49: "VarI2FromI4", 50: "VarI2FromR4", 51: "VarI2FromR8", 52: "VarI2FromCy", 53: "VarI2FromDate", 54: "VarI2FromStr", 55: "VarI2FromDisp", 56: "VarI2FromBool", 57: "SafeArraySetIID", 58: "VarI4FromUI1", 59: "VarI4FromI2", 60: "VarI4FromR4", 61: "VarI4FromR8", 62: "VarI4FromCy", 63: "VarI4FromDate", 64: "VarI4FromStr", 65: "VarI4FromDisp", 66: "VarI4FromBool", 67: "SafeArrayGetIID", 68: "VarR4FromUI1", 69: "VarR4FromI2", 70: "VarR4FromI4", 71: "VarR4FromR8", 72: "VarR4FromCy", 73: "VarR4FromDate", 74: "VarR4FromStr", 75: "VarR4FromDisp", 76: "VarR4FromBool", 77: "SafeArrayGetVartype", 78: "VarR8FromUI1", 79: "VarR8FromI2", 80: "VarR8FromI4", 81: "VarR8FromR4", 82: "VarR8FromCy", 83: "VarR8FromDate", 84: "VarR8FromStr", 85: "VarR8FromDisp", 86: "VarR8FromBool", 87: "VarFormat", 88: "VarDateFromUI1", 89: "VarDateFromI2", 90: "VarDateFromI4", 91: "VarDateFromR4", 92: "VarDateFromR8", 93: "VarDateFromCy", 94: "VarDateFromStr", 95: "VarDateFromDisp", 96: "VarDateFromBool", 97: "VarFormatDateTime", 98: "VarCyFromUI1", 99: "VarCyFromI2", 100: "VarCyFromI4", 101: "VarCyFromR4", 102: "VarCyFromR8", 103: "VarCyFromDate", 104: "VarCyFromStr", 105: "VarCyFromDisp", 106: "VarCyFromBool", 107: "VarFormatNumber", 108: "VarBstrFromUI1", 109: "VarBstrFromI2", 110: "VarBstrFromI4", 111: "VarBstrFromR4", 112: "VarBstrFromR8", 113: "VarBstrFromCy", 114: "VarBstrFromDate", 115: "VarBstrFromDisp", 116: "VarBstrFromBool", 117: "VarFormatPercent", 118: "VarBoolFromUI1", 119: "VarBoolFromI2", 120: "VarBoolFromI4", 121: "VarBoolFromR4", 122: "VarBoolFromR8", 123: "VarBoolFromDate", 124: "VarBoolFromCy", 125: "VarBoolFromStr", 126: "VarBoolFromDisp", 127: "VarFormatCurrency", 128: "VarWeekdayName", 129: "VarMonthName", 130: "VarUI1FromI2", 131: "VarUI1FromI4", 132: "VarUI1FromR4", 133: "VarUI1FromR8", 134: "VarUI1FromCy", 135: "VarUI1FromDate", 136: "VarUI1FromStr", 137: "VarUI1FromDisp", 138: "VarUI1FromBool", 139: "VarFormatFromTokens", 140: "VarTokenizeFormatString", 141: "VarAdd", 142: "VarAnd", 143: "VarDiv", 144: "DllCanUnloadNow", 145: "DllGetClassObject", 146: "DispCallFunc", 147: "VariantChangeTypeEx", 148: "SafeArrayPtrOfIndex", 149: "SysStringByteLen", 150: "SysAllocStringByteLen", 151: "DllRegisterServer", 152: "VarEqv", 153: "VarIdiv", 154: "VarImp", 155: "VarMod", 156: "VarMul", 157: "VarOr", 158: "VarPow", 159: "VarSub", 160: "CreateTypeLib", 161: "LoadTypeLib", 162: "LoadRegTypeLib", 163: "RegisterTypeLib", 164: "QueryPathOfRegTypeLib", 165: "LHashValOfNameSys", 166: "LHashValOfNameSysA", 167: "VarXor", 168: "VarAbs", 169: "VarFix", 170: "OaBuildVersion", 171: "ClearCustData", 172: "VarInt", 173: "VarNeg", 174: "VarNot", 175: "VarRound", 176: "VarCmp", 177: "VarDecAdd", 178: "VarDecDiv", 179: "VarDecMul", 180: "CreateTypeLib2", 181: "VarDecSub", 182: "VarDecAbs", 183: "LoadTypeLibEx", 184: "SystemTimeToVariantTime", 185: "VariantTimeToSystemTime", 186: "UnRegisterTypeLib", 187: "VarDecFix", 188: "VarDecInt", 189: "VarDecNeg", 190: "VarDecFromUI1", 191: "VarDecFromI2", 192: "VarDecFromI4", 193: "VarDecFromR4", 194: "VarDecFromR8", 195: "VarDecFromDate", 196: "VarDecFromCy", 197: "VarDecFromStr", 198: "VarDecFromDisp", 199: "VarDecFromBool", 200: "GetErrorInfo", 201: "SetErrorInfo", 202: "CreateErrorInfo", 203: "VarDecRound", 204: "VarDecCmp", 205: "VarI2FromI1", 206: "VarI2FromUI2", 207: "VarI2FromUI4", 208: "VarI2FromDec", 209: "VarI4FromI1", 210: "VarI4FromUI2", 211: "VarI4FromUI4", 212: "VarI4FromDec", 213: "VarR4FromI1", 214: "VarR4FromUI2", 215: "VarR4FromUI4", 216: "VarR4FromDec", 217: "VarR8FromI1", 218: "VarR8FromUI2", 219: "VarR8FromUI4", 220: "VarR8FromDec", 221: "VarDateFromI1", 222: "VarDateFromUI2", 223: "VarDateFromUI4", 224: "VarDateFromDec", 225: "VarCyFromI1", 226: "VarCyFromUI2", 227: "VarCyFromUI4", 228: "VarCyFromDec", 229: "VarBstrFromI1", 230: "VarBstrFromUI2", 231: "VarBstrFromUI4", 232: "VarBstrFromDec", 233: "VarBoolFromI1", 234: "VarBoolFromUI2", 235: "VarBoolFromUI4", 236: "VarBoolFromDec", 237: "VarUI1FromI1", 238: "VarUI1FromUI2", 239: "VarUI1FromUI4", 240: "VarUI1FromDec", 241: "VarDecFromI1", 242: "VarDecFromUI2", 243: "VarDecFromUI4", 244: "VarI1FromUI1", 245: "VarI1FromI2", 246: "VarI1FromI4", 247: "VarI1FromR4", 248: "VarI1FromR8", 249: "VarI1FromDate", 250: "VarI1FromCy", 251: "VarI1FromStr", 252: "VarI1FromDisp", 253: "VarI1FromBool", 254: "VarI1FromUI2", 255: "VarI1FromUI4", 256: "VarI1FromDec", 257: "VarUI2FromUI1", 258: "VarUI2FromI2", 259: "VarUI2FromI4", 260: "VarUI2FromR4", 261: "VarUI2FromR8", 262: "VarUI2FromDate", 263: "VarUI2FromCy", 264: "VarUI2FromStr", 265: "VarUI2FromDisp", 266: "VarUI2FromBool", 267: "VarUI2FromI1", 268: "VarUI2FromUI4", 269: "VarUI2FromDec", 270: "VarUI4FromUI1", 271: "VarUI4FromI2", 272: "VarUI4FromI4", 273: "VarUI4FromR4", 274: "VarUI4FromR8", 275: "VarUI4FromDate", 276: "VarUI4FromCy", 277: "VarUI4FromStr", 278: "VarUI4FromDisp", 279: "VarUI4FromBool", 280: "VarUI4FromI1", 281: "VarUI4FromUI2", 282: "VarUI4FromDec", 283: "BSTR_UserSize", 284: "BSTR_UserMarshal", 285: "BSTR_UserUnmarshal", 286: "BSTR_UserFree", 287: "VARIANT_UserSize", 288: "VARIANT_UserMarshal", 289: "VARIANT_UserUnmarshal", 290: "VARIANT_UserFree", 291: "LPSAFEARRAY_UserSize", 292: "LPSAFEARRAY_UserMarshal", 293: "LPSAFEARRAY_UserUnmarshal", 294: "LPSAFEARRAY_UserFree", 295: "LPSAFEARRAY_Size", 296: "LPSAFEARRAY_Marshal", 297: "LPSAFEARRAY_Unmarshal", 298: "VarDecCmpR8", 299: "VarCyAdd", 300: "DllUnregisterServer", 301: "OACreateTypeLib2", 303: "VarCyMul", 304: "VarCyMulI4", 305: "VarCySub", 306: "VarCyAbs", 307: "VarCyFix", 308: "VarCyInt", 309: "VarCyNeg", 310: "VarCyRound", 311: "VarCyCmp", 312: "VarCyCmpR8", 313: "VarBstrCat", 314: "VarBstrCmp", 315: "VarR8Pow", 316: "VarR4CmpR8", 317: "VarR8Round", 318: "VarCat", 319: "VarDateFromUdateEx", 322: "GetRecordInfoFromGuids", 323: "GetRecordInfoFromTypeInfo", 325: "SetVarConversionLocaleSetting", 326: "GetVarConversionLocaleSetting", 327: "SetOaNoCache", 329: "VarCyMulI8", 330: "VarDateFromUdate", 331: "VarUdateFromDate", 332: "GetAltMonthNames", 333: "VarI8FromUI1", 334: "VarI8FromI2", 335: "VarI8FromR4", 336: "VarI8FromR8", 337: "VarI8FromCy", 338: "VarI8FromDate", 339: "VarI8FromStr", 340: "VarI8FromDisp", 341: "VarI8FromBool", 342: "VarI8FromI1", 343: "VarI8FromUI2", 344: "VarI8FromUI4", 345: "VarI8FromDec", 346: "VarI2FromI8", 347: "VarI2FromUI8", 348: "VarI4FromI8", 349: "VarI4FromUI8", 360: "VarR4FromI8", 361: "VarR4FromUI8", 362: "VarR8FromI8", 363: "VarR8FromUI8", 364: "VarDateFromI8", 365: "VarDateFromUI8", 366: "VarCyFromI8", 367: "VarCyFromUI8", 368: "VarBstrFromI8", 369: "VarBstrFromUI8", 370: "VarBoolFromI8", 371: "VarBoolFromUI8", 372: "VarUI1FromI8", 373: "VarUI1FromUI8", 374: "VarDecFromI8", 375: "VarDecFromUI8", 376: "VarI1FromI8", 377: "VarI1FromUI8", 378: "VarUI2FromI8", 379: "VarUI2FromUI8", 401: "OleLoadPictureEx", 402: "OleLoadPictureFileEx", 411: "SafeArrayCreateVector", 412: "SafeArrayCopyData", 413: "VectorFromBstr", 414: "BstrFromVector", 415: "OleIconToCursor", 416: "OleCreatePropertyFrameIndirect", 417: "OleCreatePropertyFrame", 418: "OleLoadPicture", 419: "OleCreatePictureIndirect", 420: "OleCreateFontIndirect", 421: "OleTranslateColor", 422: "OleLoadPictureFile", 423: "OleSavePictureFile", 424: "OleLoadPicturePath", 425: "VarUI4FromI8", 426: "VarUI4FromUI8", 427: "VarI8FromUI8", 428: "VarUI8FromI8", 429: "VarUI8FromUI1", 430: "VarUI8FromI2", 431: "VarUI8FromR4", 432: "VarUI8FromR8", 433: "VarUI8FromCy", 434: "VarUI8FromDate", 435: "VarUI8FromStr", 436: "VarUI8FromDisp", 437: "VarUI8FromBool", 438: "VarUI8FromI1", 439: "VarUI8FromUI2", 440: "VarUI8FromUI4", 441: "VarUI8FromDec", 442: "RegisterTypeLibForUser", 443: "UnRegisterTypeLibForUser", } // OrdNames maps the dll names to ordinal names. var OrdNames = map[string]map[uint64]string{ "ws2_32.dll": WS232OrdNames, "wsock32.dll": WS232OrdNames, "oleaut32.dll": OleAut32OrdNames, } // OrdLookup returns API name given an ordinal. func OrdLookup(libname string, ord uint64, makeName bool) string { names, ok := OrdNames[strings.ToLower(libname)] if ok { if name, ok := names[ord]; ok { return name } } if makeName { return fmt.Sprintf("ord%d", ord) } return "" } ================================================ FILE: overlay.go ================================================ // Copyright 2022 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "errors" "io" ) // error var ( ErrNoOverlayFound = errors.New("pe does not have overlay data") ) // NewOverlayReader returns a new ReadSeeker reading the PE overlay data. func (pe *File) NewOverlayReader() (*io.SectionReader, error) { if pe.data == nil { return nil, errors.New("pe: file reader is nil") } return io.NewSectionReader(pe.f, pe.OverlayOffset, 1<<63-1), nil } // Overlay returns the overlay of the PE file. func (pe *File) Overlay() ([]byte, error) { sr, err := pe.NewOverlayReader() if err != nil { return nil, err } overlay := make([]byte, int64(pe.size)-pe.OverlayOffset) n, err := sr.ReadAt(overlay, 0) if n == len(overlay) { pe.HasOverlay = true err = nil } return overlay, err } func (pe *File) OverlayLength() int64 { return int64(pe.size) - pe.OverlayOffset } ================================================ FILE: overlay_test.go ================================================ package pe import ( "crypto/md5" "encoding/hex" "testing" ) type TestOverlay struct { overlayOffset int64 overlayLength int64 md5str string } var overlayTests = []struct { in string out TestOverlay }{ {getAbsoluteFilePath("test/putty.exe"), TestOverlay{ overlayOffset: 1163264, overlayLength: 15760, md5str: "1f46295a513e744895a6acf1029e136f", }}, } func TestFile_NewOverlayReader(t *testing.T) { for _, tt := range overlayTests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } if err := file.Parse(); err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } if file.OverlayOffset != tt.out.overlayOffset { t.Errorf("overlayLength failed, got %d, want %d", file.OverlayOffset, tt.out.overlayOffset) } overlayLength := file.OverlayLength() if overlayLength != tt.out.overlayLength { t.Errorf("overlayOffset failed, got %d, want %d", overlayLength, tt.out.overlayLength) } overlay, _ := file.Overlay() h := md5.New() h.Write(overlay) md5str := hex.EncodeToString(h.Sum(nil)) if md5str != tt.out.md5str { t.Errorf("overlayOffset failed, got %s, want %s", md5str, tt.out.md5str) } }) } } ================================================ FILE: pe.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe // Image executable types const ( // The DOS MZ executable format is the executable file format used // for .EXE files in DOS. ImageDOSSignature = 0x5A4D // MZ ImageDOSZMSignature = 0x4D5A // ZM // The New Executable (abbreviated NE or NewEXE) is a 16-bit .exe file // format, a successor to the DOS MZ executable format. It was used in // Windows 1.0–3.x, multitasking MS-DOS 4.0, OS/2 1.x, and the OS/2 subset // of Windows NT up to version 5.0 (Windows 2000). A NE is also called a // segmented executable. ImageOS2Signature = 0x454E // Linear Executable is an executable file format in the EXE family. // It was used by 32-bit OS/2, by some DOS extenders, and by Microsoft // Windows VxD files. It is an extension of MS-DOS EXE, and a successor // to NE (New Executable). ImageOS2LESignature = 0x454C // There are two main varieties of LE executables: // LX (32-bit), and LE (mixed 16/32-bit). ImageVXDSignature = 0x584C // Terse Executables have a 'VZ' signature. ImageTESignature = 0x5A56 // The Portable Executable (PE) format is a file format for executables, // object code, DLLs and others used in 32-bit and 64-bit versions of // Windows operating systems. ImageNTSignature = 0x00004550 // PE00 ) // Optional Header magic const ( ImageNtOptionalHeader32Magic = 0x10b ImageNtOptionalHeader64Magic = 0x20b ImageROMOptionalHeaderMagic = 0x10 ) // Image file machine types const ( ImageFileMachineUnknown = ImageFileHeaderMachineType(0x0) // The contents of this field are assumed to be applicable to any machine type ImageFileMachineAM33 = ImageFileHeaderMachineType(0x1d3) // Matsushita AM33 ImageFileMachineAMD64 = ImageFileHeaderMachineType(0x8664) // x64 ImageFileMachineARM = ImageFileHeaderMachineType(0x1c0) // ARM little endian ImageFileMachineARM64 = ImageFileHeaderMachineType(0xaa64) // ARM64 little endian ImageFileMachineARMNT = ImageFileHeaderMachineType(0x1c4) // ARM Thumb-2 little endian ImageFileMachineEBC = ImageFileHeaderMachineType(0xebc) // EFI byte code ImageFileMachineI386 = ImageFileHeaderMachineType(0x14c) // Intel 386 or later processors and compatible processors ImageFileMachineIA64 = ImageFileHeaderMachineType(0x200) // Intel Itanium processor family ImageFileMachineM32R = ImageFileHeaderMachineType(0x9041) // Mitsubishi M32R little endian ImageFileMachineMIPS16 = ImageFileHeaderMachineType(0x266) // MIPS16 ImageFileMachineMIPSFPU = ImageFileHeaderMachineType(0x366) // MIPS with FPU ImageFileMachineMIPSFPU16 = ImageFileHeaderMachineType(0x466) // MIPS16 with FPU ImageFileMachinePowerPC = ImageFileHeaderMachineType(0x1f0) // Power PC little endian ImageFileMachinePowerPCFP = ImageFileHeaderMachineType(0x1f1) // Power PC with floating point support ImageFileMachineR4000 = ImageFileHeaderMachineType(0x166) // MIPS little endian ImageFileMachineRISCV32 = ImageFileHeaderMachineType(0x5032) // RISC-V 32-bit address space ImageFileMachineRISCV64 = ImageFileHeaderMachineType(0x5064) // RISC-V 64-bit address space ImageFileMachineRISCV128 = ImageFileHeaderMachineType(0x5128) // RISC-V 128-bit address space ImageFileMachineSH3 = ImageFileHeaderMachineType(0x1a2) // Hitachi SH3 ImageFileMachineSH3DSP = ImageFileHeaderMachineType(0x1a3) // Hitachi SH3 DSP ImageFileMachineSH4 = ImageFileHeaderMachineType(0x1a6) // Hitachi SH4 ImageFileMachineSH5 = ImageFileHeaderMachineType(0x1a8) // Hitachi SH5 ImageFileMachineTHUMB = ImageFileHeaderMachineType(0x1c2) // Thumb ImageFileMachineWCEMIPSv2 = ImageFileHeaderMachineType(0x169) // MIPS little-endian WCE v2 ) // The Characteristics field contains flags that indicate attributes of the object or image file. const ( // Image file only. This flag indicates that the file contains no base // relocations and must be loaded at its preferred base address. In the // case of base address conflict, the OS loader reports an error. This flag // should not be set for managed PE files. ImageFileRelocsStripped = 0x0001 // Flag indicates that the file is an image file (EXE or DLL). This flag // should be set for managed PE files. If it is not set, this generally // indicates a linker error (i.e. no unresolved external references). ImageFileExecutableImage = 0x0002 // COFF line numbers have been removed. This flag should be set for managed // PE files because they do not use the debug information embedded in the // PE file itself. Instead, the debug information is saved in accompanying // program database (PDB) files. ImageFileLineNumsStripped = 0x0004 // COFF symbol table entries for local symbols have been removed. This flag // should be set for managed PE files, for the reason given in the preceding // entry. ImageFileLocalSymsStripped = 0x0008 // Aggressively trim the working set. ImageFileAggressiveWSTrim = 0x0010 // Application can handle addresses beyond the 2GB range. This flag should // not be set for pure-IL managed PE files of versions 1.0 and 1.1 but can // be set for v2.0+ files. ImageFileLargeAddressAware = 0x0020 // Little endian. ImageFileBytesReservedLow = 0x0080 // Machine is based on 32-bit architecture. This flag is usually set by // the current versions of code generators producing managed PE files. // Version 2.0 and newer, however, can produce 64-bit specific images, // which don’t have this flag set. ImageFile32BitMachine = 0x0100 // Debug information has been removed from the image file. ImageFileDebugStripped = 0x0200 // If the image file is on removable media, copy and run it from the swap // file. ImageFileRemovableRunFromSwap = 0x0400 // If the image file is on a network, copy and run it from the swap file. ImageFileNetRunFromSwap = 0x0800 // The image file is a system file (for example, a device driver). This flag ImageFileSystem = 0x1000 // The image file is a DLL rather than an EXE. It cannot be directly run. ImageFileDLL = 0x2000 // The image file should be run on a uniprocessor machine only. ImageFileUpSystemOnly = 0x4000 // Big endian. ImageFileBytesReservedHigh = 0x8000 ) // Subsystem values of an OptionalHeader. const ( ImageSubsystemUnknown = 0 // An unknown subsystem. ImageSubsystemNative = 1 // Device drivers and native Windows processes ImageSubsystemWindowsGUI = 2 // The Windows graphical user interface (GUI) subsystem. ImageSubsystemWindowsCUI = 3 // The Windows character subsystem ImageSubsystemOS2CUI = 5 // The OS/2 character subsystem. ImageSubsystemPosixCUI = 7 // The Posix character subsystem. ImageSubsystemNativeWindows = 8 // Native Win9x driver ImageSubsystemWindowsCEGUI = 9 // Windows CE ImageSubsystemEFIApplication = 10 // An Extensible Firmware Interface (EFI) application ImageSubsystemEFIBootServiceDriver = 11 // An EFI driver with boot services ImageSubsystemEFIRuntimeDriver = 12 // An EFI driver with run-time services ImageSubsystemEFIRom = 13 // An EFI ROM image . ImageSubsystemXBOX = 14 // XBOX. ImageSubsystemWindowsBootApplication = 16 // Windows boot application. ) // DllCharacteristics values of an OptionalHeader const ( ImageDllCharacteristicsReserved1 = 0x0001 // Reserved, must be zero. ImageDllCharacteristicsReserved2 = 0x0002 // Reserved, must be zero. ImageDllCharacteristicsReserved4 = 0x0004 // Reserved, must be zero. ImageDllCharacteristicsReserved8 = 0x0008 // Reserved, must be zero. ImageDllCharacteristicsHighEntropyVA = 0x0020 // Image can handle a high entropy 64-bit virtual address space ImageDllCharacteristicsDynamicBase = 0x0040 // DLL can be relocated at load time. ImageDllCharacteristicsForceIntegrity = 0x0080 // Code Integrity checks are enforced. ImageDllCharacteristicsNXCompact = 0x0100 // Image is NX compatible. ImageDllCharacteristicsNoIsolation = 0x0200 // Isolation aware, but do not isolate the image. ImageDllCharacteristicsNoSEH = 0x0400 // Does not use structured exception (SE) handling. No SE handler may be called in this image. ImageDllCharacteristicsNoBind = 0x0800 // Do not bind the image. ImageDllCharacteristicsAppContainer = 0x1000 // Image must execute in an AppContainer ImageDllCharacteristicsWdmDriver = 0x2000 // A WDM driver. ImageDllCharacteristicsGuardCF = 0x4000 // Image supports Control Flow Guard. ImageDllCharacteristicsTerminalServiceAware = 0x8000 // Terminal Server aware. ) // ImageDirectoryEntry represents an entry inside the data directories. type ImageDirectoryEntry int // DataDirectory entries of an OptionalHeader const ( ImageDirectoryEntryExport ImageDirectoryEntry = iota // Export Table ImageDirectoryEntryImport // Import Table ImageDirectoryEntryResource // Resource Table ImageDirectoryEntryException // Exception Table ImageDirectoryEntryCertificate // Certificate Directory ImageDirectoryEntryBaseReloc // Base Relocation Table ImageDirectoryEntryDebug // Debug ImageDirectoryEntryArchitecture // Architecture Specific Data ImageDirectoryEntryGlobalPtr // The RVA of the value to be stored in the global pointer register. ImageDirectoryEntryTLS // The thread local storage (TLS) table ImageDirectoryEntryLoadConfig // The load configuration table ImageDirectoryEntryBoundImport // The bound import table ImageDirectoryEntryIAT // Import Address Table ImageDirectoryEntryDelayImport // Delay Import Descriptor ImageDirectoryEntryCLR // CLR Runtime Header ImageDirectoryEntryReserved // Must be zero ImageNumberOfDirectoryEntries // Tables count. ) // FileInfo represents the PE file information struct. type FileInfo struct { Is32 bool Is64 bool HasDOSHdr bool HasRichHdr bool HasCOFF bool HasNTHdr bool HasSections bool HasExport bool HasImport bool HasResource bool HasException bool HasCertificate bool HasReloc bool HasDebug bool HasArchitect bool HasGlobalPtr bool HasTLS bool HasLoadCFG bool HasBoundImp bool HasIAT bool HasDelayImp bool HasCLR bool HasOverlay bool IsSigned bool } ================================================ FILE: reloc.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "errors" ) var ( // ErrInvalidBaseRelocVA is reposed when base reloc lies outside of the image. ErrInvalidBaseRelocVA = errors.New("invalid relocation information." + " Base Relocation VirtualAddress is outside of PE Image") // ErrInvalidBasicRelocSizeOfBloc is reposed when base reloc is too large. ErrInvalidBasicRelocSizeOfBloc = errors.New("invalid relocation " + "information. Base Relocation SizeOfBlock too large") ) // ImageBaseRelocationEntryType represents the type of an in image base relocation entry. type ImageBaseRelocationEntryType uint8 // The Type field of the relocation record indicates what kind of relocation // should be performed. Different relocation types are defined for each type // of machine. const ( // The base relocation is skipped. This type can be used to pad a block. ImageRelBasedAbsolute = 0 // The base relocation adds the high 16 bits of the difference to the 16-bit // field at offset. The 16-bit field represents the high value of a 32-bit word. ImageRelBasedHigh = 1 // The base relocation adds the low 16 bits of the difference to the 16-bit // field at offset. The 16-bit field represents the low half of a 32-bit word. ImageRelBasedLow = 2 // The base relocation applies all 32 bits of the difference to the 32-bit // field at offset. ImageRelBasedHighLow = 3 // The base relocation adds the high 16 bits of the difference to the 16-bit // field at offset. The 16-bit field represents the high value of a 32-bit // word. The low 16 bits of the 32-bit value are stored in the 16-bit word // that follows this base relocation. This means that this base relocation // occupies two slots. ImageRelBasedHighAdj = 4 // The relocation interpretation is dependent on the machine type. // When the machine type is MIPS, the base relocation applies to a MIPS jump // instruction. ImageRelBasedMIPSJmpAddr = 5 // This relocation is meaningful only when the machine type is ARM or Thumb. // The base relocation applies the 32-bit address of a symbol across a // consecutive MOVW/MOVT instruction pair. ImageRelBasedARMMov32 = 5 // This relocation is only meaningful when the machine type is RISC-V. The // base relocation applies to the high 20 bits of a 32-bit absolute address. ImageRelBasedRISCVHigh20 = 5 // Reserved, must be zero. ImageRelReserved = 6 // This relocation is meaningful only when the machine type is Thumb. // The base relocation applies the 32-bit address of a symbol to a // consecutive MOVW/MOVT instruction pair. ImageRelBasedThumbMov32 = 7 // This relocation is only meaningful when the machine type is RISC-V. // The base relocation applies to the low 12 bits of a 32-bit absolute // address formed in RISC-V I-type instruction format. ImageRelBasedRISCVLow12i = 7 // This relocation is only meaningful when the machine type is RISC-V. // The base relocation applies to the low 12 bits of a 32-bit absolute // address formed in RISC-V S-type instruction format. ImageRelBasedRISCVLow12s = 8 // The relocation is only meaningful when the machine type is MIPS. // The base relocation applies to a MIPS16 jump instruction. ImageRelBasedMIPSJmpAddr16 = 9 // The base relocation applies the difference to the 64-bit field at offset. ImageRelBasedDir64 = 10 ) const ( // MaxDefaultRelocEntriesCount represents the default maximum number of // relocations entries to parse. Some malware uses a fake huge reloc entries that // can slow significantly the parser. // Example: 01008963d32f5cc17b64c31446386ee5b36a7eab6761df87a2989ba9394d8f3d MaxDefaultRelocEntriesCount = 0x1000 ) // ImageBaseRelocation represents the IMAGE_BASE_RELOCATION structure. // Each chunk of base relocation data begins with an IMAGE_BASE_RELOCATION structure. type ImageBaseRelocation struct { // The image base plus the page RVA is added to each offset to create the // VA where the base relocation must be applied. VirtualAddress uint32 `json:"virtual_address"` // The total number of bytes in the base relocation block, including the // Page RVA and Block Size fields and the Type/Offset fields that follow. SizeOfBlock uint32 `json:"size_of_block"` } // ImageBaseRelocationEntry represents an image base relocation entry. type ImageBaseRelocationEntry struct { // Locate data that must be reallocated in buffer (data being an address // we use pointer of pointer). Data uint16 `json:"data"` // The offset of the relocation. This value plus the VirtualAddress // in IMAGE_BASE_RELOCATION is the complete RVA. Offset uint16 `json:"offset"` // A value that indicates the kind of relocation that should be performed. // Valid relocation types depend on machine type. Type ImageBaseRelocationEntryType `json:"type"` } // Relocation represents the relocation table which holds the data that needs to // be relocated. type Relocation struct { // Points to the ImageBaseRelocation structure. Data ImageBaseRelocation `json:"data"` // holds the list of entries for each chunk. Entries []ImageBaseRelocationEntry `json:"entries"` } func (pe *File) parseRelocations(dataRVA, rva, size uint32) ([]ImageBaseRelocationEntry, error) { var relocEntries []ImageBaseRelocationEntry relocEntriesCount := size / 2 if relocEntriesCount > pe.opts.MaxRelocEntriesCount { pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) // Defense-in-depth: cap the iteration. A block with a genuinely huge // (but still smaller than SizeOfImage) SizeOfBlock would otherwise // have us decoding tens of thousands of meaningless WORDs as fake // entries — see MaxDefaultRelocEntriesCount above for the reference // malware sample that motivated this cap. relocEntriesCount = pe.opts.MaxRelocEntriesCount } offset := pe.GetOffsetFromRva(dataRVA) var err error for i := uint32(0); i < relocEntriesCount; i++ { entry := ImageBaseRelocationEntry{} entry.Data, err = pe.ReadUint16(offset + (i * 2)) if err != nil { break } entry.Type = ImageBaseRelocationEntryType(entry.Data >> 12) entry.Offset = entry.Data & 0x0fff relocEntries = append(relocEntries, entry) } return relocEntries, nil } func (pe *File) parseRelocDirectory(rva, size uint32) error { var sizeOfImage uint32 switch pe.Is64 { case true: sizeOfImage = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).SizeOfImage case false: sizeOfImage = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).SizeOfImage } relocSize := uint32(binary.Size(ImageBaseRelocation{})) end := rva + size for rva < end { baseReloc := ImageBaseRelocation{} offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&baseReloc, offset, relocSize) if err != nil { return err } // Implicit end-of-table on a {VirtualAddress=0, SizeOfBlock=0} block. // // The PE/COFF spec does not define this sentinel — it states only that // the data-directory Size field bounds the table (see "The .reloc // Section (Image Only)" in the PE Format spec). In practice though, // many real binaries declare BaseReloc.Size larger than the actual // reloc data: e.g. when the .reloc section's VirtualSize exceeds its // RawSize, or when the linker rounds the directory to a section/page // boundary, the slack is zero-filled. Walking into that slack reads // {0, 0} — and continuing would (a) loop forever (zero size advances // rva by 0) and (b) make parseRelocations underflow on // SizeOfBlock - relocSize and synthesise millions of phantom entries // from whatever bytes happen to sit past the real reloc data. // // The Windows loader and every major PE parser (pefile, LIEF, ...) // treat {0, 0} as table termination for the same reasons; we follow // suit. The MaxRelocEntriesCount cap in parseRelocations remains as // the spec-strict backstop for blocks with a non-zero but bogus size. if baseReloc.SizeOfBlock == 0 { break } // Per the spec, Block Size is "the total number of bytes in the base // relocation block, including the Page RVA and Block Size fields" // — so the minimum legitimate value is the 8-byte header alone (zero // entries). Anything smaller is malformed and would underflow the // SizeOfBlock - relocSize calculation passed to parseRelocations. if baseReloc.SizeOfBlock < relocSize { return ErrInvalidBasicRelocSizeOfBloc } // VirtualAddress must lie within the Image. if baseReloc.VirtualAddress > sizeOfImage { return ErrInvalidBaseRelocVA } // SizeOfBlock must be less or equal than the size of the image. // It's a rather loose sanity test. if baseReloc.SizeOfBlock > sizeOfImage { return ErrInvalidBasicRelocSizeOfBloc } relocEntries, err := pe.parseRelocations(rva+relocSize, baseReloc.VirtualAddress, baseReloc.SizeOfBlock-relocSize) if err != nil { return err } pe.Relocations = append(pe.Relocations, Relocation{ Data: baseReloc, Entries: relocEntries, }) rva += baseReloc.SizeOfBlock } if len(pe.Relocations) > 0 { pe.HasReloc = true } return nil } // String returns the string representation of the `Type` field of a base reloc entry. func (t ImageBaseRelocationEntryType) String(pe *File) string { relocTypesMap := map[ImageBaseRelocationEntryType]string{ ImageRelBasedAbsolute: "Absolute", ImageRelBasedHigh: "High", ImageRelBasedLow: "Low", ImageRelBasedHighLow: "HighLow", ImageRelBasedHighAdj: "HighAdj", ImageRelReserved: "Reserved", ImageRelBasedRISCVLow12s: "RISC-V Low12s", ImageRelBasedMIPSJmpAddr16: "MIPS Jmp Addr16", ImageRelBasedDir64: "DIR64", } if value, ok := relocTypesMap[t]; ok { return value } switch pe.NtHeader.FileHeader.Machine { case ImageFileMachineMIPS16, ImageFileMachineMIPSFPU, ImageFileMachineMIPSFPU16, ImageFileMachineWCEMIPSv2: if t == ImageRelBasedMIPSJmpAddr { return "MIPS JMP Addr" } case ImageFileMachineARM, ImageFileMachineARM64, ImageFileMachineARMNT: if t == ImageRelBasedARMMov32 { return "ARM MOV 32" } if t == ImageRelBasedThumbMov32 { return "Thumb MOV 32" } case ImageFileMachineRISCV32, ImageFileMachineRISCV64, ImageFileMachineRISCV128: if t == ImageRelBasedRISCVHigh20 { return "RISC-V High 20" } if t == ImageRelBasedRISCVLow12i { return "RISC-V Low 12" } } return "?" } ================================================ FILE: reloc_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) func TestParseRelocDirectoryData(t *testing.T) { type TestRelocData struct { imgBaseRelocation ImageBaseRelocation relocEntriesCount int relocDataIndex int } tests := []struct { in string out TestRelocData }{ { getAbsoluteFilePath("test/putty.exe"), TestRelocData{ imgBaseRelocation: ImageBaseRelocation{ VirtualAddress: 0xd8000, SizeOfBlock: 0xc}, relocEntriesCount: 18, relocDataIndex: 17, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryBaseReloc] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryBaseReloc] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseRelocDirectory(va, size) if err != nil { t.Fatalf("parseRelocDirectory(%s) failed, reason: %v", tt.in, err) } relocs := file.Relocations if len(relocs) != tt.out.relocEntriesCount { t.Errorf("relocations entries count assertion failed, got %v, want %v", len(relocs), tt.out.relocEntriesCount) } imgBaseRelocation := relocs[tt.out.relocDataIndex].Data if imgBaseRelocation != tt.out.imgBaseRelocation { t.Errorf("reloc data assertion failed, got %v, want %v", imgBaseRelocation, tt.out.imgBaseRelocation) } }) } } // TestParseRelocDirectoryZeroSizeOfBlock exercises the end-of-table sentinel // (VirtualAddress=0, SizeOfBlock=0). Before the fix, the sentinel was handed // to parseRelocations unchanged; SizeOfBlock - relocSize underflowed (uint32) // and the parser synthesised millions of phantom entries from bytes past the // real reloc table, ballooning the marshalled output to ~154 MB. // // The sample below is a real PE with 14 legitimate relocation blocks followed // by the {0,0} sentinel. We assert that: // - the sentinel block is NOT appended to pe.Relocations // - the total number of parsed entries stays bounded (3558 real entries) // - the last real block matches the expected header func TestParseRelocDirectoryZeroSizeOfBlock(t *testing.T) { in := getAbsoluteFilePath( "test/05df99cc2e77a59aa3443cae13325af553271bddaeedff3c08bf4f6995bbc62d") ops := Options{Fast: true} file, err := New(in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", in, err) } if err := file.Parse(); err != nil { t.Fatalf("Parse(%s) failed, reason: %v", in, err) } var va, size uint32 switch file.Is64 { case true: dirEntry := file.NtHeader.OptionalHeader.(ImageOptionalHeader64). DataDirectory[ImageDirectoryEntryBaseReloc] va, size = dirEntry.VirtualAddress, dirEntry.Size case false: dirEntry := file.NtHeader.OptionalHeader.(ImageOptionalHeader32). DataDirectory[ImageDirectoryEntryBaseReloc] va, size = dirEntry.VirtualAddress, dirEntry.Size } if err := file.parseRelocDirectory(va, size); err != nil { t.Fatalf("parseRelocDirectory(%s) failed, reason: %v", in, err) } // Exactly 14 real blocks — the {0,0} sentinel must not be appended. if got, want := len(file.Relocations), 14; got != want { t.Fatalf("relocation block count: got %d, want %d", got, want) } // No block should carry a zero SizeOfBlock — if one does, the sentinel // slipped through. for i, r := range file.Relocations { if r.Data.SizeOfBlock == 0 { t.Errorf("block %d has SizeOfBlock=0 (sentinel leaked into result)", i) } } // Total entries across all blocks must be bounded (pre-fix: 4,270,384). total := 0 for _, r := range file.Relocations { total += len(r.Entries) } if total != 3558 { t.Errorf("total relocation entries: got %d, want 3558", total) } // The last legitimate block. last := file.Relocations[13] wantLast := ImageBaseRelocation{VirtualAddress: 0x466000, SizeOfBlock: 20} if last.Data != wantLast { t.Errorf("last block header: got %+v, want %+v", last.Data, wantLast) } if len(last.Entries) != 6 { t.Errorf("last block entry count: got %d, want 6", len(last.Entries)) } } func TestParseRelocDirectoryEntry(t *testing.T) { type TestRelocEntry struct { imgBaseRelocationEntry ImageBaseRelocationEntry relocEntriesCount int relocDataIndex int relocEntryIndex int relocTypeMeaning string } tests := []struct { in string out TestRelocEntry }{ { getAbsoluteFilePath("test/putty.exe"), TestRelocEntry{ imgBaseRelocationEntry: ImageBaseRelocationEntry{ Data: 0xab00, Offset: 0xb00, Type: 0xa, }, relocDataIndex: 0x1, relocEntriesCount: 154, relocEntryIndex: 17, relocTypeMeaning: "DIR64", }, }, { getAbsoluteFilePath("test/arp.dll"), TestRelocEntry{ imgBaseRelocationEntry: ImageBaseRelocationEntry{ Data: 0x8004, Offset: 0x4, Type: 0x8, }, relocDataIndex: 3, relocEntriesCount: 204, relocEntryIndex: 1, relocTypeMeaning: "RISC-V Low12s", }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryBaseReloc] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryBaseReloc] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseRelocDirectory(va, size) if err != nil { t.Fatalf("parseRelocDirectory(%s) failed, reason: %v", tt.in, err) } reloc := file.Relocations[tt.out.relocDataIndex] if len(reloc.Entries) != tt.out.relocEntriesCount { t.Errorf("relocations entries count assertion failed, got %v, want %v", len(reloc.Entries), tt.out.relocEntriesCount) } relocEntry := reloc.Entries[tt.out.relocEntryIndex] if relocEntry != tt.out.imgBaseRelocationEntry { t.Errorf("reloc image base relocation entry assertion failed, got %v, want %v", relocEntry, tt.out.imgBaseRelocationEntry) } relocType := relocEntry.Type.String(file) if relocType != tt.out.relocTypeMeaning { t.Errorf("pretty reloc type assertion failed, got %v, want %v", relocType, tt.out.relocTypeMeaning) } }) } } ================================================ FILE: resource.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // ResourceType represents a resource type. type ResourceType int // ResourceLang represents a resource language. type ResourceLang uint32 // ResourceSubLang represents a resource sub language. type ResourceSubLang uint32 // https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-lcid/70feba9f-294e-491e-b6eb-56532684c37f // Special resource (sub)language identifiers. const ( LangNeutral ResourceLang = 0x00 // Default custom (MUI) locale language LangUserDefault ResourceLang = 0x01 // User default locale language LangSystemDefault ResourceLang = 0x02 // System default locale language LangInvariant ResourceLang = 0x7F // Invariant locale language SubLangNeutral ResourceSubLang = 0x00 // Neutral sub-language SubLangInvariant ResourceSubLang = 0x00 // Invariant sub-language SubLangDefault ResourceSubLang = 0x01 // User default sub-language SubLangSysDefault ResourceSubLang = 0x02 // System default sub-language SubLangCustomDefault ResourceSubLang = 0x03 // Default custom sub-language SubLangCustomUnspecified ResourceSubLang = 0x04 // Unspecified custom sub-language SubLangMUICustomDefault ResourceSubLang = 0x05 // Default custom MUI sub-language ) // All resource language identifiers. const ( // Afrikaans (af) LangAfrikaans ResourceLang = 0x0036 // Albanian (sq) LangAlbanian ResourceLang = 0x001C // Alsatian (gsw) LangAlsatian ResourceLang = 0x0084 // Amharic (am) LangAmharic ResourceLang = 0x005E // Arabic (ar) LangArabic ResourceLang = 0x0001 // Armenian (hy) LangArmenian ResourceLang = 0x002B // Assamese (as) LangAssamese ResourceLang = 0x004D // Azerbaijani (Latin) (az) LangAzerbaijaniLatin ResourceLang = 0x002C // Bangla (bn) LangBangla ResourceLang = 0x0045 // Bashkir (ba) LangBashkir ResourceLang = 0x006D // Basque (eu) LangBasque ResourceLang = 0x002D // Belarusian (be) LangBelarusian ResourceLang = 0x0023 // Bosnian (Latin) (bs) LangBosnianLatin ResourceLang = 0x781A // Breton (br) LangBreton ResourceLang = 0x007E // Bulgarian (bg) LangBulgarian ResourceLang = 0x0002 // Burmese (my) LangBurmese ResourceLang = 0x0055 // Catalan (ca) LangCatalan ResourceLang = 0x0003 // Central Kurdish (ku) LangCentralKurdish ResourceLang = 0x0092 // Cherokee (chr) LangCherokee ResourceLang = 0x005C // Chinese (Simplified) (zh) LangChineseSimplified ResourceLang = 0x7804 // Corsican (co) LangCorsican ResourceLang = 0x0083 // Croatian (hr) LangCroatian ResourceLang = 0x001A // Czech (cs) LangCzech ResourceLang = 0x0005 // Danish (da) LangDanish ResourceLang = 0x0006 // Dari (prs) LangDari ResourceLang = 0x008C // Divehi (dv) LangDivehi ResourceLang = 0x0065 // Dutch (nl) LangDutch ResourceLang = 0x0013 // English (en) LangEnglish ResourceLang = 0x0009 // Estonian (et) LangEstonian ResourceLang = 0x0025 // Faroese (fo) LangFaroese ResourceLang = 0x0038 // Filipino (fil) LangFilipino ResourceLang = 0x0064 // Finnish (fi) LangFinnish ResourceLang = 0x000B // French (fr) LangFrench ResourceLang = 0x000C // Frisian (fy) LangFrisian ResourceLang = 0x0062 // Fulah (ff) LangFulah ResourceLang = 0x0067 // Fulah (Latin) (ff-Latn) LangFulahLatin ResourceLang = 0x7C67 // Galician (gl) LangGalician ResourceLang = 0x0056 // Georgian (ka) LangGeorgian ResourceLang = 0x0037 // German (de) LangGerman ResourceLang = 0x0007 // Greek (el) LangGreek ResourceLang = 0x0008 // Greenlandic (kl) LangGreenlandic ResourceLang = 0x006F // Guarani (gn) LangGuarani ResourceLang = 0x0074 // Gujarati (gu) LangGujarati ResourceLang = 0x0047 // Hausa (Latin) (ha) LangHausaLatin ResourceLang = 0x0068 // Hawaiian (haw) LangHawaiian ResourceLang = 0x0075 // Hebrew (he) LangHebrew ResourceLang = 0x000D // Hindi (hi) LangHindi ResourceLang = 0x0039 // Hungarian (hu) LangHungarian ResourceLang = 0x000E // Icelandic (is) LangIcelandic ResourceLang = 0x000F // Igbo (ig) LangIgbo ResourceLang = 0x0070 // Indonesian (id) LangIndonesian ResourceLang = 0x0021 // Inuktitut (Latin) (iu) LangInuktitutLatin ResourceLang = 0x005D // Irish (ga) LangIrish ResourceLang = 0x003C // Italian (it) LangItalian ResourceLang = 0x0010 // Japanese (ja) LangJapanese ResourceLang = 0x0011 // Kannada (kn) LangKannada ResourceLang = 0x004B // Kashmiri (ks) LangKashmiri ResourceLang = 0x0060 // Kazakh (kk) LangKazakh ResourceLang = 0x003F // Khmer (km) LangKhmer ResourceLang = 0x0053 // K'iche (quc) LangKiche ResourceLang = 0x0086 // Kinyarwanda (rw) LangKinyarwanda ResourceLang = 0x0087 // Kiswahili (sw) LangKiswahili ResourceLang = 0x0041 // Konkani (kok) LangKonkani ResourceLang = 0x0057 // Korean (ko) LangKorean ResourceLang = 0x0012 // Kyrgyz (ky) LangKyrgyz ResourceLang = 0x0040 // Lao (lo) LangLao ResourceLang = 0x0054 // Latvian (lv) LangLatvian ResourceLang = 0x0026 // Lithuanian (lt) LangLithuanian ResourceLang = 0x0027 // Lower Sorbian (dsb) LangLowerSorbian ResourceLang = 0x7C2E // Luxembourgish (lb) LangLuxembourgish ResourceLang = 0x006E // Macedonian (mk) LangMacedonian ResourceLang = 0x002F // Malay (ms) LangMalay ResourceLang = 0x003E // Malayalam (ml) LangMalayalam ResourceLang = 0x004C // Maltese (mt) LangMaltese ResourceLang = 0x003A // Maori (mi) LangMaori ResourceLang = 0x0081 // Mapudungun (arn) LangMapudungun ResourceLang = 0x007A // Marathi (mr) LangMarathi ResourceLang = 0x004E // Mohawk (moh) LangMohawk ResourceLang = 0x007C // Mongolian (Cyrillic) (mn) LangMongolianCyrillic ResourceLang = 0x0050 // Nepali (ne) LangNepali ResourceLang = 0x0061 // Norwegian (Bokmal) (no) LangNorwegianBokmalNo ResourceLang = 0x0014 // Norwegian (Bokmal) (nb) LangNorwegianBokmal ResourceLang = 0x7C14 // Norwegian (Nynorsk) (nn) LangNorwegianNynorsk ResourceLang = 0x7814 // Occitan (oc) LangOccitan ResourceLang = 0x0082 // Odia (or) LangOdia ResourceLang = 0x0048 // Oromo (om) LangOromo ResourceLang = 0x0072 // Pashto (ps) LangPashto ResourceLang = 0x0063 // Persian (fa) LangPersian ResourceLang = 0x0029 // Polish (pl) LangPolish ResourceLang = 0x0015 // Portuguese (pt) LangPortuguese ResourceLang = 0x0016 // Punjabi (pa) LangPunjabi ResourceLang = 0x0046 // Quechua (quz) LangQuechua ResourceLang = 0x006B // Romanian (ro) LangRomanian ResourceLang = 0x0018 // Romansh (rm) LangRomansh ResourceLang = 0x0017 // Russian (ru) LangRussian ResourceLang = 0x0019 // Sakha (sah) LangSakha ResourceLang = 0x0085 // Sami (Inari) (smn) LangSamiInari ResourceLang = 0x703B // Sami (Lule) (smj) LangSamiLule ResourceLang = 0x7C3B // Sami (Northern) (se) LangSamiNorthern ResourceLang = 0x003B // Sami (Skolt) (sms) LangSamiSkolt ResourceLang = 0x743B // Sami (Southern) (sma) LangSamiSouthern ResourceLang = 0x783B // Sanskrit (sa) LangSanskrit ResourceLang = 0x004F // Scottish Gaelic (gd) LangScottishGaelic ResourceLang = 0x0091 // Serbian (Latin) (sr) LangSerbianLatin ResourceLang = 0x7C1A // Sesotho Sa Leboa (nso) LangSesothoSaLeboa ResourceLang = 0x006C // Setswana (tn) LangSetswana ResourceLang = 0x0032 // Sindhi (sd) LangSindhi ResourceLang = 0x0059 // Sinhala (si) LangSinhala ResourceLang = 0x005B // Slovak (sk) LangSlovak ResourceLang = 0x001B // Slovenian (sl) LangSlovenian ResourceLang = 0x0024 // Somali (so) LangSomali ResourceLang = 0x0077 // Sotho (st) LangSotho ResourceLang = 0x0030 // Spanish (es) LangSpanish ResourceLang = 0x000A // Swedish (sv) LangSwedish ResourceLang = 0x001D // Syriac (syr) LangSyriac ResourceLang = 0x005A // Tajik (Cyrillic) (tg) LangTajikCyrillic ResourceLang = 0x0028 // Tamazight (Latin) (tzm) LangTamazightLatin ResourceLang = 0x005F // Tamil (ta) LangTamil ResourceLang = 0x0049 // Tatar (tt) LangTatar ResourceLang = 0x0044 // Telugu (te) LangTelugu ResourceLang = 0x004A // Thai (th) LangThai ResourceLang = 0x001E // Tibetan (bo) LangTibetan ResourceLang = 0x0051 // Tigrinya (ti) LangTigrinya ResourceLang = 0x0073 // Tsonga (ts) LangTsonga ResourceLang = 0x0031 // Turkish (tr) LangTurkish ResourceLang = 0x001F // Turkmen (tk) LangTurkmen ResourceLang = 0x0042 // Ukrainian (uk) LangUkrainian ResourceLang = 0x0022 // Upper Sorbian (hsb) LangUpperSorbian ResourceLang = 0x002E // Urdu (ur) LangUrdu ResourceLang = 0x0020 // Uyghur (ug) LangUyghur ResourceLang = 0x0080 // Uzbek (Latin) (uz) LangUzbekLatin ResourceLang = 0x0043 // Venda (ve) LangVenda ResourceLang = 0x0033 // Vietnamese (vi) LangVietnamese ResourceLang = 0x002A // Welsh (cy) LangWelsh ResourceLang = 0x0052 // Wolof (wo) LangWolof ResourceLang = 0x0088 // Xhosa (xh) LangXhosa ResourceLang = 0x0034 // Yi (ii) LangYi ResourceLang = 0x0078 // Yoruba (yo) LangYoruba ResourceLang = 0x006A // Zulu (zu) LangZulu ResourceLang = 0x0035 ) // All resource sub-language identifiers. const ( // Afrikaans South Africa (af-ZA) SubLangAfrikaansSouthAfrica ResourceSubLang = iota // Albanian Albania (sq-AL) SubLangAlbanianAlbania // Alsatian France (gsw-FR) SubLangAlsatianFrance // Amharic Ethiopia (am-ET) SubLangAmharicEthiopia // Arabic Algeria (ar-DZ) SubLangArabicAlgeria // Arabic Bahrain (ar-BH) SubLangArabicBahrain // Arabic Egypt (ar-EG) SubLangArabicEgypt // Arabic Iraq (ar-IQ) SubLangArabicIraq // Arabic Jordan (ar-JO) SubLangArabicJordan // Arabic Kuwait (ar-KW) SubLangArabicKuwait // Arabic Lebanon (ar-LB) SubLangArabicLebanon // Arabic Libya (ar-LY) SubLangArabicLibya // Arabic Morocco (ar-MA) SubLangArabicMorocco // Arabic Oman (ar-OM) SubLangArabicOman // Arabic Qatar (ar-QA) SubLangArabicQatar // Arabic Saudi Arabia (ar-SA) SubLangArabicSaudiArabia // Arabic Syria (ar-SY) SubLangArabicSyria // Arabic Tunisia (ar-TN) SubLangArabicTunisia // Arabic U.a.e. (ar-AE) SubLangArabicUae // Arabic Yemen (ar-YE) SubLangArabicYemen // Armenian Armenia (hy-AM) SubLangArmenianArmenia // Assamese India (as-IN) SubLangAssameseIndia // Azerbaijani (Cyrillic) (az-Cyrl) SubLangAzerbaijaniCyrillic // Azerbaijani (Cyrillic) Azerbaijan (az-Cyrl-AZ) SubLangAzerbaijaniCyrillicAzerbaijan // Azerbaijani (Latin) (az-Latn) SubLangAzerbaijaniLatin // Azerbaijani (Latin) Azerbaijan (az-Latn-AZ) SubLangAzerbaijaniLatinAzerbaijan // Bangla Bangladesh (bn-BD) SubLangBanglaBangladesh // Bangla India (bn-IN) SubLangBanglaIndia // Bashkir Russia (ba-RU) SubLangBashkirRussia // Basque Spain (eu-ES) SubLangBasqueSpain // Belarusian Belarus (be-BY) SubLangBelarusianBelarus // Bosnian (Cyrillic) (bs-Cyrl) SubLangBosnianCyrillic // Bosnian (Cyrillic) Bosnia And Herzegovina (bs-Cyrl-BA) SubLangBosnianCyrillicBosniaAndHerzegovina // Bosnian (Latin) (bs-Latn) SubLangBosnianLatin // Bosnian (Latin) Bosnia And Herzegovina (bs-Latn-BA) SubLangBosnianLatinBosniaAndHerzegovina // Breton France (br-FR) SubLangBretonFrance // Bulgarian Bulgaria (bg-BG) SubLangBulgarianBulgaria // Burmese Myanmar (my-MM) SubLangBurmeseMyanmar // Catalan Spain (ca-ES) SubLangCatalanSpain // Central Atlas Tamazight (Arabic) Morocco (tzm-ArabMA) SubLangCentralAtlasTamazightArabicMorocco // Central Kurdish (ku-Arab) SubLangCentralKurdish // Central Kurdish Iraq (ku-Arab-IQ) SubLangCentralKurdishIraq // Cherokee (chr-Cher) SubLangCherokee // Cherokee United States (chr-Cher-US) SubLangCherokeeUnitedStates // Chinese (Simplified) (zh-Hans) SubLangChineseSimplified // Chinese (Simplified) People's Republic Of China (zh-CN) SubLangChineseSimplifiedPeoplesRepublicOfChina // Chinese (Simplified) Singapore (zh-SG) SubLangChineseSimplifiedSingapore // Chinese (Traditional) (zh-Hant) SubLangChineseTraditional // Chinese (Traditional) Hong Kong S.a.r. (zh-HK) SubLangChineseTraditionalHongKongSar // Chinese (Traditional) Macao S.a.r. (zh-MO) SubLangChineseTraditionalMacaoSar // Chinese (Traditional) Taiwan (zh-TW) SubLangChineseTraditionalTaiwan // Corsican France (co-FR) SubLangCorsicanFrance // Croatian Croatia (hr-HR) SubLangCroatianCroatia // Croatian (Latin) Bosnia And Herzegovina (hr-BA) SubLangCroatianLatinBosniaAndHerzegovina // Czech Czech Republic (cs-CZ) SubLangCzechCzechRepublic // Danish Denmark (da-DK) SubLangDanishDenmark // Dari Afghanistan (prs-AF) SubLangDariAfghanistan // Divehi Maldives (dv-MV) SubLangDivehiMaldives // Dutch Belgium (nl-BE) SubLangDutchBelgium // Dutch Netherlands (nl-NL) SubLangDutchNetherlands // Dzongkha Bhutan (dz-BT) SubLangDzongkhaBhutan // English Australia (en-AU) SubLangEnglishAustralia // English Belize (en-BZ) SubLangEnglishBelize // English Canada (en-CA) SubLangEnglishCanada // English Caribbean (en-029) SubLangEnglishCaribbean // English Hong Kong (en-HK) SubLangEnglishHongKong // English India (en-IN) SubLangEnglishIndia // English Ireland (en-IE) SubLangEnglishIreland // English Jamaica (en-JM) SubLangEnglishJamaica // English Malaysia (en-MY) SubLangEnglishMalaysia // English New Zealand (en-NZ) SubLangEnglishNewZealand // English Republic Of The Philippines (en-PH) SubLangEnglishRepublicOfThePhilippines // English Singapore (en-SG) SubLangEnglishSingapore // English South Africa (en-ZA) SubLangEnglishSouthAfrica // English Trinidad And Tobago (en-TT) SubLangEnglishTrinidadAndTobago // English United Arab Emirates (en-AE) SubLangEnglishUnitedArabEmirates // English United Kingdom (en-GB) SubLangEnglishUnitedKingdom // English United States (en-US) SubLangEnglishUnitedStates // English Zimbabwe (en-ZW) SubLangEnglishZimbabwe // Estonian Estonia (et-EE) SubLangEstonianEstonia // Faroese Faroe Islands (fo-FO) SubLangFaroeseFaroeIslands // Filipino Philippines (fil-PH) SubLangFilipinoPhilippines // Finnish Finland (fi-FI) SubLangFinnishFinland // French Belgium (fr-BE) SubLangFrenchBelgium // French Cameroon (fr-CM) SubLangFrenchCameroon // French Canada (fr-CA) SubLangFrenchCanada // French Caribbean (fr-029) SubLangFrenchCaribbean // French Congo, Drc (fr-CD) SubLangFrenchCongoDrc // French Côte D'ivoire (fr-CI) SubLangFrenchCôteDivoire // French France (fr-FR) SubLangFrenchFrance // French Haiti (fr-HT) SubLangFrenchHaiti // French Luxembourg (fr-LU) SubLangFrenchLuxembourg // French Mali (fr-ML) SubLangFrenchMali // French Morocco (fr-MA) SubLangFrenchMorocco // French Principality Of Monaco (fr-MC) SubLangFrenchPrincipalityOfMonaco // French Reunion (fr-RE) SubLangFrenchReunion // French Senegal (fr-SN) SubLangFrenchSenegal // French Switzerland (fr-CH) SubLangFrenchSwitzerland // Frisian Netherlands (fy-NL) SubLangFrisianNetherlands // Fulah Nigeria (ff-NG) SubLangFulahNigeria // Fulah (Latin) Nigeria (ff-Latn-NG) SubLangFulahLatinNigeria // Fulah Senegal (ff-Latn-SN) SubLangFulahSenegal // Galician Spain (gl-ES) SubLangGalicianSpain // Georgian Georgia (ka-GE) SubLangGeorgianGeorgia // German Austria (de-AT) SubLangGermanAustria // German Germany (de-DE) SubLangGermanGermany // German Liechtenstein (de-LI) SubLangGermanLiechtenstein // German Luxembourg (de-LU) SubLangGermanLuxembourg // German Switzerland (de-CH) SubLangGermanSwitzerland // Greek Greece (el-GR) SubLangGreekGreece // Greenlandic Greenland (kl-GL) SubLangGreenlandicGreenland // Guarani Paraguay (gn-PY) SubLangGuaraniParaguay // Gujarati India (gu-IN) SubLangGujaratiIndia // Hausa (Latin) (ha-Latn) SubLangHausaLatin // Hausa (Latin) Nigeria (ha-Latn-NG) SubLangHausaLatinNigeria // Hawaiian United States (haw-US) SubLangHawaiianUnitedStates // Hebrew Israel (he-IL) SubLangHebrewIsrael // Hindi India (hi-IN) SubLangHindiIndia // Hungarian Hungary (hu-HU) SubLangHungarianHungary // Icelandic Iceland (is-IS) SubLangIcelandicIceland // Igbo Nigeria (ig-NG) SubLangIgboNigeria // Indonesian Indonesia (id-ID) SubLangIndonesianIndonesia // Inuktitut (Latin) (iu-Latn) SubLangInuktitutLatin // Inuktitut (Latin) Canada (iu-Latn-CA) SubLangInuktitutLatinCanada // Inuktitut (Syllabics) (iu-Cans) SubLangInuktitutSyllabics // Inuktitut (Syllabics) Canada (iu-Cans-CA) SubLangInuktitutSyllabicsCanada // Irish Ireland (ga-IE) SubLangIrishIreland // Italian Italy (it-IT) SubLangItalianItaly // Italian Switzerland (it-CH) SubLangItalianSwitzerland // Japanese Japan (ja-JP) SubLangJapaneseJapan // Kannada India (kn-IN) SubLangKannadaIndia // Kanuri (Latin) Nigeria (kr-Latn-NG) SubLangKanuriLatinNigeria // Kashmiri Perso-Arabic (ks-Arab) SubLangKashmiriPersoArabic // Kashmiri (Devanagari) India (ks-Deva-IN) SubLangKashmiriDevanagariIndia // Kazakh Kazakhstan (kk-KZ) SubLangKazakhKazakhstan // Khmer Cambodia (km-KH) SubLangKhmerCambodia // K'iche Guatemala (quc-Latn-GT) SubLangKicheGuatemala // Kinyarwanda Rwanda (rw-RW) SubLangKinyarwandaRwanda // Kiswahili Kenya (sw-KE) SubLangKiswahiliKenya // Konkani India (kok-IN) SubLangKonkaniIndia // Korean Korea (ko-KR) SubLangKoreanKorea // Kyrgyz Kyrgyzstan (ky-KG) SubLangKyrgyzKyrgyzstan // Lao Lao P.d.r. (lo-LA) SubLangLaoLaoPdr // Latin Vatican City (la-VA) SubLangLatinVaticanCity // Latvian Latvia (lv-LV) SubLangLatvianLatvia // Lithuanian Lithuania (lt-LT) SubLangLithuanianLithuania // Lower Sorbian Germany (dsb-DE) SubLangLowerSorbianGermany // Luxembourgish Luxembourg (lb-LU) SubLangLuxembourgishLuxembourg // Macedonian North Macedonia (mk-MK) SubLangMacedonianNorthMacedonia // Malay Brunei Darussalam (ms-BN) SubLangMalayBruneiDarussalam // Malay Malaysia (ms-MY) SubLangMalayMalaysia // Malayalam India (ml-IN) SubLangMalayalamIndia // Maltese Malta (mt-MT) SubLangMalteseMalta // Maori New Zealand (mi-NZ) SubLangMaoriNewZealand // Mapudungun Chile (arn-CL) SubLangMapudungunChile // Marathi India (mr-IN) SubLangMarathiIndia // Mohawk Canada (moh-CA) SubLangMohawkCanada // Mongolian (Cyrillic) (mn-Cyrl) SubLangMongolianCyrillic // Mongolian (Cyrillic) Mongolia (mn-MN) SubLangMongolianCyrillicMongolia // Mongolian (Traditional Mongolian) (mn-Mong) SubLangMongolianTraditionalMongolian // Mongolian (Traditional Mongolian) People's Republic Of China (mn-MongCN) SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina // Mongolian (Traditional Mongolian) Mongolia (mn-MongMN) SubLangMongolianTraditionalMongolianMongolia // Nepali India (ne-IN) SubLangNepaliIndia // Nepali Nepal (ne-NP) SubLangNepaliNepal // Norwegian (Bokmal) Norway (nb-NO) SubLangNorwegianBokmalNorway // Norwegian (Nynorsk) Norway (nn-NO) SubLangNorwegianNynorskNorway // Occitan France (oc-FR) SubLangOccitanFrance // Odia India (or-IN) SubLangOdiaIndia // Oromo Ethiopia (om-ET) SubLangOromoEthiopia // Pashto Afghanistan (ps-AF) SubLangPashtoAfghanistan // Persian Iran (fa-IR) SubLangPersianIran // Polish Poland (pl-PL) SubLangPolishPoland // Portuguese Brazil (pt-BR) SubLangPortugueseBrazil // Portuguese Portugal (pt-PT) SubLangPortuguesePortugal // Pseudo Language Pseudo Locale For East Asian/Complex Script Localization Testing (qps-ploca) SubLangPseudoLanguagePseudoLocaleForEastAsianComplexScriptLocalizationTesting // Pseudo Language Pseudo Locale Used For Localization Testing (qps-ploc) SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTesting // Pseudo Language Pseudo Locale Used For Localization Testing Of Mirrored Locales (qps-plocm) SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTestingOfMirroredLocales // Punjabi (pa-Arab) SubLangPunjabi // Punjabi India (pa-IN) SubLangPunjabiIndia // Punjabi Islamic Republic Of Pakistan (pa-Arab-PK) SubLangPunjabiIslamicRepublicOfPakistan // Quechua Bolivia (quz-BO) SubLangQuechuaBolivia // Quechua Ecuador (quz-EC) SubLangQuechuaEcuador // Quechua Peru (quz-PE) SubLangQuechuaPeru // Romanian Moldova (ro-MD) SubLangRomanianMoldova // Romanian Romania (ro-RO) SubLangRomanianRomania // Romansh Switzerland (rm-CH) SubLangRomanshSwitzerland // Russian Moldova (ru-MD) SubLangRussianMoldova // Russian Russia (ru-RU) SubLangRussianRussia // Sakha Russia (sah-RU) SubLangSakhaRussia // Sami (Inari) Finland (smn-FI) SubLangSamiInariFinland // Sami (Lule) Norway (smj-NO) SubLangSamiLuleNorway // Sami (Lule) Sweden (smj-SE) SubLangSamiLuleSweden // Sami (Northern) Finland (se-FI) SubLangSamiNorthernFinland // Sami (Northern) Norway (se-NO) SubLangSamiNorthernNorway // Sami (Northern) Sweden (se-SE) SubLangSamiNorthernSweden // Sami (Skolt) Finland (sms-FI) SubLangSamiSkoltFinland // Sami (Southern) Norway (sma-NO) SubLangSamiSouthernNorway // Sami (Southern) Sweden (sma-SE) SubLangSamiSouthernSweden // Sanskrit India (sa-IN) SubLangSanskritIndia // Scottish Gaelic United Kingdom (gd-GB) SubLangScottishGaelicUnitedKingdom // Serbian (Cyrillic) (sr-Cyrl) SubLangSerbianCyrillic // Serbian (Cyrillic) Bosnia And Herzegovina (sr-Cyrl-BA) SubLangSerbianCyrillicBosniaAndHerzegovina // Serbian (Cyrillic) Montenegro (sr-Cyrl-ME) SubLangSerbianCyrillicMontenegro // Serbian (Cyrillic) Serbia (sr-Cyrl-RS) SubLangSerbianCyrillicSerbia // Serbian (Cyrillic) Serbia And Montenegro (Former) (sr-Cyrl-CS) SubLangSerbianCyrillicSerbiaAndMontenegroFormer // Serbian (Latin) (sr-Latn) SubLangSerbianLatin // Serbian (Latin) Bosnia And Herzegovina (sr-Latn-BA) SubLangSerbianLatinBosniaAndHerzegovina // Serbian (Latin) Montenegro (sr-Latn-ME) SubLangSerbianLatinMontenegro // Serbian (Latin) Serbia (sr-Latn-RS) SubLangSerbianLatinSerbia // Serbian (Latin) Serbia And Montenegro (Former) (sr-Latn-CS) SubLangSerbianLatinSerbiaAndMontenegroFormer // Sesotho Sa Leboa South Africa (nso-ZA) SubLangSesothoSaLeboaSouthAfrica // Setswana Botswana (tn-BW) SubLangSetswanaBotswana // Setswana South Africa (tn-ZA) SubLangSetswanaSouthAfrica // Sindhi (sd-Arab) SubLangSindhi // Sindhi Islamic Republic Of Pakistan (sd-Arab-PK) SubLangSindhiIslamicRepublicOfPakistan // Sinhala Sri Lanka (si-LK) SubLangSinhalaSriLanka // Slovak Slovakia (sk-SK) SubLangSlovakSlovakia // Slovenian Slovenia (sl-SI) SubLangSlovenianSlovenia // Somali Somalia (so-SO) SubLangSomaliSomalia // Sotho South Africa (st-ZA) SubLangSothoSouthAfrica // Spanish Argentina (es-AR) SubLangSpanishArgentina // Spanish Bolivarian Republic Of Venezuela (es-VE) SubLangSpanishBolivarianRepublicOfVenezuela // Spanish Bolivia (es-BO) SubLangSpanishBolivia // Spanish Chile (es-CL) SubLangSpanishChile // Spanish Colombia (es-CO) SubLangSpanishColombia // Spanish Costa Rica (es-CR) SubLangSpanishCostaRica // Spanish Cuba (es-CU) SubLangSpanishCuba // Spanish Dominican Republic (es-DO) SubLangSpanishDominicanRepublic // Spanish Ecuador (es-EC) SubLangSpanishEcuador // Spanish El Salvador (es-SV) SubLangSpanishElSalvador // Spanish Guatemala (es-GT) SubLangSpanishGuatemala // Spanish Honduras (es-HN) SubLangSpanishHonduras // Spanish Latin America (es-419) SubLangSpanishLatinAmerica // Spanish Mexico (es-MX) SubLangSpanishMexico // Spanish Nicaragua (es-NI) SubLangSpanishNicaragua // Spanish Panama (es-PA) SubLangSpanishPanama // Spanish Paraguay (es-PY) SubLangSpanishParaguay // Spanish Peru (es-PE) SubLangSpanishPeru // Spanish Puerto Rico (es-PR) SubLangSpanishPuertoRico // Spanish Spain (es-ES_tradnl) SubLangSpanishSpainTraditional // Spanish Spain (es-ES) SubLangSpanishSpain // Spanish United States (es-US) SubLangSpanishUnitedStates // Spanish Uruguay (es-UY) SubLangSpanishUruguay // Swedish Finland (sv-FI) SubLangSwedishFinland // Swedish Sweden (sv-SE) SubLangSwedishSweden // Syriac Syria (syr-SY) SubLangSyriacSyria // Tajik (Cyrillic) (tg-Cyrl) SubLangTajikCyrillic // Tajik (Cyrillic) Tajikistan (tg-Cyrl-TJ) SubLangTajikCyrillicTajikistan // Tamazight (Latin) (tzm-Latn) SubLangTamazightLatin // Tamazight (Latin) Algeria (tzm-Latn-DZ) SubLangTamazightLatinAlgeria // Tamil India (ta-IN) SubLangTamilIndia // Tamil Sri Lanka (ta-LK) SubLangTamilSriLanka // Tatar Russia (tt-RU) SubLangTatarRussia // Telugu India (te-IN) SubLangTeluguIndia // Thai Thailand (th-TH) SubLangThaiThailand // Tibetan People's Republic Of China (bo-CN) SubLangTibetanPeoplesRepublicOfChina // Tigrinya Eritrea (ti-ER) SubLangTigrinyaEritrea // Tigrinya Ethiopia (ti-ET) SubLangTigrinyaEthiopia // Tsonga South Africa (ts-ZA) SubLangTsongaSouthAfrica // Turkish Turkey (tr-TR) SubLangTurkishTurkey // Turkmen Turkmenistan (tk-TM) SubLangTurkmenTurkmenistan // Ukrainian Ukraine (uk-UA) SubLangUkrainianUkraine // Upper Sorbian Germany (hsb-DE) SubLangUpperSorbianGermany // Urdu India (ur-IN) SubLangUrduIndia // Urdu Islamic Republic Of Pakistan (ur-PK) SubLangUrduIslamicRepublicOfPakistan // Uyghur People's Republic Of China (ug-CN) SubLangUyghurPeoplesRepublicOfChina // Uzbek (Cyrillic) (uz-Cyrl) SubLangUzbekCyrillic // Uzbek (Cyrillic) Uzbekistan (uz-Cyrl-UZ) SubLangUzbekCyrillicUzbekistan // Uzbek (Latin) (uz-Latn) SubLangUzbekLatin // Uzbek (Latin) Uzbekistan (uz-Latn-UZ) SubLangUzbekLatinUzbekistan // Valencian Spain (ca-ESvalencia) SubLangValencianSpain // Venda South Africa (ve-ZA) SubLangVendaSouthAfrica // Vietnamese Vietnam (vi-VN) SubLangVietnameseVietnam // Welsh United Kingdom (cy-GB) SubLangWelshUnitedKingdom // Wolof Senegal (wo-SN) SubLangWolofSenegal // Xhosa South Africa (xh-ZA) SubLangXhosaSouthAfrica // Yi People's Republic Of China (ii-CN) SubLangYiPeoplesRepublicOfChina // Yiddish World (yi-001) SubLangYiddishWorld // Yoruba Nigeria (yo-NG) SubLangYorubaNigeria // Zulu South Africa (zu-ZA) SubLangZuluSouthAfrica ) const ( maxAllowedEntries = 0x1000 ) // Predefined Resource Types. const ( RTCursor ResourceType = iota + 1 // Hardware-dependent cursor resource. RTBitmap = 2 // Bitmap resource. RTIcon = 3 // Hardware-dependent icon resource. RTMenu = 4 // Menu resource. RTDialog = 5 // Dialog box. RTString = 6 // String-table entry. RTFontDir = 7 // Font directory resource. RTFont = 8 // Font resource. RTAccelerator = 9 // Accelerator table. RTRCdata = 10 // Application-defined resource (raw data). RTMessageTable = 11 // Message-table entry. RTGroupCursor = RTCursor + 11 // Hardware-independent cursor resource. RTGroupIcon = RTIcon + 11 // Hardware-independent icon resource. RTVersion = 16 // Version resource. RTDlgInclude = 17 // Dialog include entry. RTPlugPlay = 19 // Plug and Play resource. RTVxD = 20 // VXD. RTAniCursor = 21 // Animated cursor. RTAniIcon = 22 // Animated icon. RTHtml = 23 // HTML resource. RTManifest = 24 // Side-by-Side Assembly Manifest. ) // ImageResourceDirectory represents the IMAGE_RESOURCE_DIRECTORY. // This data structure should be considered the heading of a table because the // table actually consists of directory entries. type ImageResourceDirectory struct { // Resource flags. This field is reserved for future use. It is currently // set to zero. Characteristics uint32 `json:"characteristics"` // The time that the resource data was created by the resource compiler. TimeDateStamp uint32 `json:"time_date_stamp"` // The major version number, set by the user. MajorVersion uint16 `json:"major_version"` // The minor version number, set by the user. MinorVersion uint16 `json:"minor_version"` // The number of directory entries immediately following the table that use // strings to identify Type, Name, or Language entries (depending on the // level of the table). NumberOfNamedEntries uint16 `json:"number_of_named_entries"` // The number of directory entries immediately following the Name entries // that use numeric IDs for Type, Name, or Language entries. NumberOfIDEntries uint16 `json:"number_of_id_entries"` } // ImageResourceDirectoryEntry represents an entry in the resource directory // entries. type ImageResourceDirectoryEntry struct { // Name is used to identify either a type of resource, a resource name, or a // resource's language ID. Name uint32 `json:"name"` // OffsetToData is always used to point to a sibling in the tree, either a // directory node or a leaf node. OffsetToData uint32 `json:"offset_to_data"` } // ImageResourceDataEntry Each Resource Data entry describes an actual unit of // raw data in the Resource Data area. type ImageResourceDataEntry struct { // The address of a unit of resource data in the Resource Data area. OffsetToData uint32 `json:"offset_to_data"` // The size, in bytes, of the resource data that is pointed to by the Data // RVA field. Size uint32 `json:"size"` // The code page that is used to decode code point values within the // resource data. Typically, the code page would be the Unicode code page. CodePage uint32 `json:"code_page"` // Reserved, must be 0. Reserved uint32 `json:"reserved"` } // ResourceDirectory represents resource directory information. type ResourceDirectory struct { // IMAGE_RESOURCE_DIRECTORY structure. Struct ImageResourceDirectory `json:"struct"` // list of entries. Entries []ResourceDirectoryEntry `json:"entries"` } // ResourceDirectoryEntry represents a resource directory entry. type ResourceDirectoryEntry struct { // IMAGE_RESOURCE_DIRECTORY_ENTRY structure. Struct ImageResourceDirectoryEntry `json:"struct"` // If the resource is identified by name this attribute will contain the // name string. Empty string otherwise. If identified by id, the id is // available at `ID` field. Name string `json:"name"` // The resource identifier. ID uint32 `json:"id"` // IsResourceDir tell us if the entry is pointing to a resource directory or // a resource data entry. IsResourceDir bool `json:"is_resource_dir"` // If this entry has a lower level directory this attribute will point to // the ResourceDirData instance representing it. Directory ResourceDirectory `json:"directory"` // If this entry has no further lower directories and points to the actual // resource data, this attribute will reference the corresponding // ResourceDataEntry instance. Data ResourceDataEntry `json:"data"` } // ResourceDataEntry represents a resource data entry. type ResourceDataEntry struct { // IMAGE_RESOURCE_DATA_ENTRY structure. Struct ImageResourceDataEntry `json:"struct"` // Primary language ID. Lang ResourceLang `json:"lang"` // Sub language ID. SubLang ResourceSubLang `json:"sub_lang"` } func (pe *File) parseResourceDataEntry(rva uint32) ImageResourceDataEntry { dataEntry := ImageResourceDataEntry{} dataEntrySize := uint32(binary.Size(dataEntry)) offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&dataEntry, offset, dataEntrySize) if err != nil { pe.logger.Warnf("Error parsing a resource directory data entry, the RVA is invalid") } return dataEntry } func (pe *File) parseResourceDirectoryEntry(rva uint32) *ImageResourceDirectoryEntry { resource := ImageResourceDirectoryEntry{} resourceSize := uint32(binary.Size(resource)) offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&resource, offset, resourceSize) if err != nil { return nil } if resource == (ImageResourceDirectoryEntry{}) { return nil } // resource.NameOffset = resource.Name & 0x7FFFFFFF // resource.__pad = resource.Name & 0xFFFF0000 // resource.Id = resource.Name & 0x0000FFFF // resource.DataIsDirectory = (resource.OffsetToData & 0x80000000) >> 31 // resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFF return &resource } // Navigating the resource directory hierarchy is like navigating a hard disk. // There's a master directory (the root directory), which has subdirectories. // The subdirectories have subdirectories of their own that may point to the // raw resource data for things like dialog templates. func (pe *File) doParseResourceDirectory(rva, size, baseRVA, level uint32, dirs []uint32) (ResourceDirectory, error) { resourceDir := ImageResourceDirectory{} resourceDirSize := uint32(binary.Size(resourceDir)) offset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&resourceDir, offset, resourceDirSize) if err != nil { return ResourceDirectory{}, err } if baseRVA == 0 { baseRVA = rva } if len(dirs) == 0 { dirs = append(dirs, rva) } // Advance the RVA to the position immediately following the directory // table header and pointing to the first entry in the table. rva += resourceDirSize numberOfEntries := int(resourceDir.NumberOfNamedEntries + resourceDir.NumberOfIDEntries) var dirEntries []ResourceDirectoryEntry // Set a hard limit on the maximum reasonable number of entries. if numberOfEntries > maxAllowedEntries { pe.logger.Warnf(`Error parsing the resources directory. The directory contains %d entries`, numberOfEntries) return ResourceDirectory{}, nil } for i := 0; i < numberOfEntries; i++ { res := pe.parseResourceDirectoryEntry(rva) if res == nil { pe.logger.Warn("Error parsing a resource directory entry, the RVA is invalid") break } nameIsString := (res.Name & 0x80000000) >> 31 entryName := "" entryID := uint32(0) if nameIsString == 0 { entryID = res.Name } else { nameOffset := res.Name & 0x7FFFFFFF uStringOffset := pe.GetOffsetFromRva(baseRVA + nameOffset) maxLen, err := pe.ReadUint16(uStringOffset) if err != nil { break } entryName = pe.readUnicodeStringAtRVA(baseRVA+nameOffset+2, uint32(maxLen*2)) } // A directory entry points to either another resource directory or to // the data for an individual resource. When the directory entry points // to another resource directory, the high bit of the second DWORD in // the structure is set and the remaining 31 bits are an offset to the // resource directory. dataIsDirectory := (res.OffsetToData & 0x80000000) >> 31 // The offset is relative to the beginning of the resource section, // not an RVA. OffsetToDirectory := res.OffsetToData & 0x7FFFFFFF if dataIsDirectory > 0 { // One trick malware can do is to recursively reference // the next directory. This causes hilarity to ensue when // trying to parse everything correctly. // If the original RVA given to this function is equal to // the next one to parse, we assume that it's a trick. // Instead of raising a PEFormatError this would skip some // reasonable data so we just break. // 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample. if intInSlice(baseRVA+OffsetToDirectory, dirs) { break } level++ dirs = append(dirs, baseRVA+OffsetToDirectory) directoryEntry, _ := pe.doParseResourceDirectory( baseRVA+OffsetToDirectory, size-(rva-baseRVA), baseRVA, level, dirs) dirEntries = append(dirEntries, ResourceDirectoryEntry{ Struct: *res, Name: entryName, ID: entryID, IsResourceDir: true, Directory: directoryEntry}) } else { // data is entry dataEntryStruct := pe.parseResourceDataEntry(baseRVA + OffsetToDirectory) entryData := ResourceDataEntry{ Struct: dataEntryStruct, Lang: ResourceLang(res.Name & 0x3ff), SubLang: ResourceSubLang(res.Name >> 10), } dirEntries = append(dirEntries, ResourceDirectoryEntry{ Struct: *res, Name: entryName, ID: entryID, IsResourceDir: false, Data: entryData}) } rva += uint32(binary.Size(res)) } return ResourceDirectory{ Struct: resourceDir, Entries: dirEntries, }, nil } // The resource directory contains resources like dialog templates, icons, // and bitmaps. The resources are found in a section called .rsrc section. func (pe *File) parseResourceDirectory(rva, size uint32) error { var dirs []uint32 Resources, err := pe.doParseResourceDirectory(rva, size, 0, 0, dirs) if err != nil { return err } pe.Resources = Resources pe.HasResource = true return err } // String stringify the resource type. func (rt ResourceType) String() string { rsrcTypeMap := map[ResourceType]string{ RTCursor: "Cursor", RTBitmap: "Bitmap", RTIcon: "Icon", RTMenu: "Menu", RTDialog: "Dialog box", RTString: "String", RTFontDir: "Font directory", RTFont: "Font", RTAccelerator: "Accelerator", RTRCdata: "RC Data", RTMessageTable: "Message Table", RTGroupCursor: "Group Cursor", RTGroupIcon: "Group Icon", RTVersion: "Version", RTDlgInclude: "Dialog Include", RTPlugPlay: "Plug & Play", RTVxD: "VxD", RTAniCursor: "Animated Cursor", RTAniIcon: "Animated Icon", RTHtml: "HTML", RTManifest: "Manifest", } if val, ok := rsrcTypeMap[rt]; ok { return val } return "?" } // String stringify the resource language. func (lang ResourceLang) String() string { rsrcLangMap := map[ResourceLang]string{ LangAfrikaans: "Afrikaans (af)", LangAlbanian: "Albanian (sq)", LangAlsatian: "Alsatian (gsw)", LangAmharic: "Amharic (am)", LangArabic: "Arabic (ar)", LangArmenian: "Armenian (hy)", LangAssamese: "Assamese (as)", LangAzerbaijaniLatin: "Azerbaijani (Latin) (az)", LangBangla: "Bangla (bn)", LangBashkir: "Bashkir (ba)", LangBasque: "Basque (eu)", LangBelarusian: "Belarusian (be)", LangBosnianLatin: "Bosnian (Latin) (bs)", LangBreton: "Breton (br)", LangBulgarian: "Bulgarian (bg)", LangBurmese: "Burmese (my)", LangCatalan: "Catalan (ca)", LangCentralKurdish: "Central Kurdish (ku)", LangCherokee: "Cherokee (chr)", LangChineseSimplified: "Chinese (Simplified) (zh)", LangCorsican: "Corsican (co)", LangCroatian: "Croatian (hr)", LangCzech: "Czech (cs)", LangDanish: "Danish (da)", LangDari: "Dari (prs)", LangDivehi: "Divehi (dv)", LangDutch: "Dutch (nl)", LangEnglish: "English (en)", LangEstonian: "Estonian (et)", LangFaroese: "Faroese (fo)", LangFilipino: "Filipino (fil)", LangFinnish: "Finnish (fi)", LangFrench: "French (fr)", LangFrisian: "Frisian (fy)", LangFulah: "Fulah (ff)", LangFulahLatin: "Fulah (Latin) (ff-Latn)", LangGalician: "Galician (gl)", LangGeorgian: "Georgian (ka)", LangGerman: "German (de)", LangGreek: "Greek (el)", LangGreenlandic: "Greenlandic (kl)", LangGuarani: "Guarani (gn)", LangGujarati: "Gujarati (gu)", LangHausaLatin: "Hausa (Latin) (ha)", LangHawaiian: "Hawaiian (haw)", LangHebrew: "Hebrew (he)", LangHindi: "Hindi (hi)", LangHungarian: "Hungarian (hu)", LangIcelandic: "Icelandic (is)", LangIgbo: "Igbo (ig)", LangIndonesian: "Indonesian (id)", LangInuktitutLatin: "Inuktitut (Latin) (iu)", LangIrish: "Irish (ga)", LangItalian: "Italian (it)", LangJapanese: "Japanese (ja)", LangKannada: "Kannada (kn)", LangKashmiri: "Kashmiri (ks)", LangKazakh: "Kazakh (kk)", LangKhmer: "Khmer (km)", LangKiche: "K'iche (quc)", LangKinyarwanda: "Kinyarwanda (rw)", LangKiswahili: "Kiswahili (sw)", LangKonkani: "Konkani (kok)", LangKorean: "Korean (ko)", LangKyrgyz: "Kyrgyz (ky)", LangLao: "Lao (lo)", LangLatvian: "Latvian (lv)", LangLithuanian: "Lithuanian (lt)", LangLowerSorbian: "Lower Sorbian (dsb)", LangLuxembourgish: "Luxembourgish (lb)", LangMacedonian: "Macedonian (mk)", LangMalay: "Malay (ms)", LangMalayalam: "Malayalam (ml)", LangMaltese: "Maltese (mt)", LangMaori: "Maori (mi)", LangMapudungun: "Mapudungun (arn)", LangMarathi: "Marathi (mr)", LangMohawk: "Mohawk (moh)", LangMongolianCyrillic: "Mongolian (Cyrillic) (mn)", LangNepali: "Nepali (ne)", LangNorwegianBokmalNo: "Norwegian (Bokmal) (no)", LangNorwegianBokmal: "Norwegian (Bokmal) (nb)", LangNorwegianNynorsk: "Norwegian (Nynorsk) (nn)", LangOccitan: "Occitan (oc)", LangOdia: "Odia (or)", LangOromo: "Oromo (om)", LangPashto: "Pashto (ps)", LangPersian: "Persian (fa)", LangPolish: "Polish (pl)", LangPortuguese: "Portuguese (pt)", LangPunjabi: "Punjabi (pa)", LangQuechua: "Quechua (quz)", LangRomanian: "Romanian (ro)", LangRomansh: "Romansh (rm)", LangRussian: "Russian (ru)", LangSakha: "Sakha (sah)", LangSamiInari: "Sami (Inari) (smn)", LangSamiLule: "Sami (Lule) (smj)", LangSamiNorthern: "Sami (Northern) (se)", LangSamiSkolt: "Sami (Skolt) (sms)", LangSamiSouthern: "Sami (Southern) (sma)", LangSanskrit: "Sanskrit (sa)", LangScottishGaelic: "Scottish Gaelic (gd)", LangSerbianLatin: "Serbian (Latin) (sr)", LangSesothoSaLeboa: "Sesotho Sa Leboa (nso)", LangSetswana: "Setswana (tn)", LangSindhi: "Sindhi (sd)", LangSinhala: "Sinhala (si)", LangSlovak: "Slovak (sk)", LangSlovenian: "Slovenian (sl)", LangSomali: "Somali (so)", LangSotho: "Sotho (st)", LangSpanish: "Spanish (es)", LangSwedish: "Swedish (sv)", LangSyriac: "Syriac (syr)", LangTajikCyrillic: "Tajik (Cyrillic) (tg)", LangTamazightLatin: "Tamazight (Latin) (tzm)", LangTamil: "Tamil (ta)", LangTatar: "Tatar (tt)", LangTelugu: "Telugu (te)", LangThai: "Thai (th)", LangTibetan: "Tibetan (bo)", LangTigrinya: "Tigrinya (ti)", LangTsonga: "Tsonga (ts)", LangTurkish: "Turkish (tr)", LangTurkmen: "Turkmen (tk)", LangUkrainian: "Ukrainian (uk)", LangUpperSorbian: "Upper Sorbian (hsb)", LangUrdu: "Urdu (ur)", LangUyghur: "Uyghur (ug)", LangUzbekLatin: "Uzbek (Latin) (uz)", LangVenda: "Venda (ve)", LangVietnamese: "Vietnamese (vi)", LangWelsh: "Welsh (cy)", LangWolof: "Wolof (wo)", LangXhosa: "Xhosa (xh)", LangYi: "Yi (ii)", LangYoruba: "Yoruba (yo)", LangZulu: "Zulu (zu)", } if val, ok := rsrcLangMap[lang]; ok { return val } return "?" } // String stringify the resource sub language. func (subLang ResourceSubLang) String() string { rsrcSubLangMap := map[ResourceSubLang]string{ SubLangAfrikaansSouthAfrica: "Afrikaans South Africa (af-ZA)", SubLangAlbanianAlbania: "Albanian Albania (sq-AL)", SubLangAlsatianFrance: "Alsatian France (gsw-FR)", SubLangAmharicEthiopia: "Amharic Ethiopia (am-ET)", SubLangArabicAlgeria: "Arabic Algeria (ar-DZ)", SubLangArabicBahrain: "Arabic Bahrain (ar-BH)", SubLangArabicEgypt: "Arabic Egypt (ar-EG)", SubLangArabicIraq: "Arabic Iraq (ar-IQ)", SubLangArabicJordan: "Arabic Jordan (ar-JO)", SubLangArabicKuwait: "Arabic Kuwait (ar-KW)", SubLangArabicLebanon: "Arabic Lebanon (ar-LB)", SubLangArabicLibya: "Arabic Libya (ar-LY)", SubLangArabicMorocco: "Arabic Morocco (ar-MA)", SubLangArabicOman: "Arabic Oman (ar-OM)", SubLangArabicQatar: "Arabic Qatar (ar-QA)", SubLangArabicSaudiArabia: "Arabic Saudi Arabia (ar-SA)", SubLangArabicSyria: "Arabic Syria (ar-SY)", SubLangArabicTunisia: "Arabic Tunisia (ar-TN)", SubLangArabicUae: "Arabic U.a.e. (ar-AE)", SubLangArabicYemen: "Arabic Yemen (ar-YE)", SubLangArmenianArmenia: "Armenian Armenia (hy-AM)", SubLangAssameseIndia: "Assamese India (as-IN)", SubLangAzerbaijaniCyrillic: "Azerbaijani (Cyrillic) (az-Cyrl)", SubLangAzerbaijaniCyrillicAzerbaijan: "Azerbaijani (Cyrillic) Azerbaijan (az-Cyrl-AZ)", SubLangAzerbaijaniLatin: "Azerbaijani (Latin) (az-Latn)", SubLangAzerbaijaniLatinAzerbaijan: "Azerbaijani (Latin) Azerbaijan (az-Latn-AZ)", SubLangBanglaBangladesh: "Bangla Bangladesh (bn-BD)", SubLangBanglaIndia: "Bangla India (bn-IN)", SubLangBashkirRussia: "Bashkir Russia (ba-RU)", SubLangBasqueSpain: "Basque Spain (eu-ES)", SubLangBelarusianBelarus: "Belarusian Belarus (be-BY)", SubLangBosnianCyrillic: "Bosnian (Cyrillic) (bs-Cyrl)", SubLangBosnianCyrillicBosniaAndHerzegovina: "Bosnian (Cyrillic) Bosnia And Herzegovina (bs-Cyrl-BA)", SubLangBosnianLatin: "Bosnian (Latin) (bs-Latn)", SubLangBosnianLatinBosniaAndHerzegovina: "Bosnian (Latin) Bosnia And Herzegovina (bs-Latn-BA)", SubLangBretonFrance: "Breton France (br-FR)", SubLangBulgarianBulgaria: "Bulgarian Bulgaria (bg-BG)", SubLangBurmeseMyanmar: "Burmese Myanmar (my-MM)", SubLangCatalanSpain: "Catalan Spain (ca-ES)", SubLangCentralAtlasTamazightArabicMorocco: "Central Atlas Tamazight (Arabic) Morocco (tzm-ArabMA)", SubLangCentralKurdish: "Central Kurdish (ku-Arab)", SubLangCentralKurdishIraq: "Central Kurdish Iraq (ku-Arab-IQ)", SubLangCherokee: "Cherokee (chr-Cher)", SubLangCherokeeUnitedStates: "Cherokee United States (chr-Cher-US)", SubLangChineseSimplified: "Chinese (Simplified) (zh-Hans)", SubLangChineseSimplifiedPeoplesRepublicOfChina: "Chinese (Simplified) People's Republic Of China (zh-CN)", SubLangChineseSimplifiedSingapore: "Chinese (Simplified) Singapore (zh-SG)", SubLangChineseTraditional: "Chinese (Traditional) (zh-Hant)", SubLangChineseTraditionalHongKongSar: "Chinese (Traditional) Hong Kong S.a.r. (zh-HK)", SubLangChineseTraditionalMacaoSar: "Chinese (Traditional) Macao S.a.r. (zh-MO)", SubLangChineseTraditionalTaiwan: "Chinese (Traditional) Taiwan (zh-TW)", SubLangCorsicanFrance: "Corsican France (co-FR)", SubLangCroatianCroatia: "Croatian Croatia (hr-HR)", SubLangCroatianLatinBosniaAndHerzegovina: "Croatian (Latin) Bosnia And Herzegovina (hr-BA)", SubLangCzechCzechRepublic: "Czech Czech Republic (cs-CZ)", SubLangDanishDenmark: "Danish Denmark (da-DK)", SubLangDariAfghanistan: "Dari Afghanistan (prs-AF)", SubLangDivehiMaldives: "Divehi Maldives (dv-MV)", SubLangDutchBelgium: "Dutch Belgium (nl-BE)", SubLangDutchNetherlands: "Dutch Netherlands (nl-NL)", SubLangDzongkhaBhutan: "Dzongkha Bhutan (dz-BT)", SubLangEnglishAustralia: "English Australia (en-AU)", SubLangEnglishBelize: "English Belize (en-BZ)", SubLangEnglishCanada: "English Canada (en-CA)", SubLangEnglishCaribbean: "English Caribbean (en-029)", SubLangEnglishHongKong: "English Hong Kong (en-HK)", SubLangEnglishIndia: "English India (en-IN)", SubLangEnglishIreland: "English Ireland (en-IE)", SubLangEnglishJamaica: "English Jamaica (en-JM)", SubLangEnglishMalaysia: "English Malaysia (en-MY)", SubLangEnglishNewZealand: "English New Zealand (en-NZ)", SubLangEnglishRepublicOfThePhilippines: "English Republic Of The Philippines (en-PH)", SubLangEnglishSingapore: "English Singapore (en-SG)", SubLangEnglishSouthAfrica: "English South Africa (en-ZA)", SubLangEnglishTrinidadAndTobago: "English Trinidad And Tobago (en-TT)", SubLangEnglishUnitedArabEmirates: "English United Arab Emirates (en-AE)", SubLangEnglishUnitedKingdom: "English United Kingdom (en-GB)", SubLangEnglishUnitedStates: "English United States (en-US)", SubLangEnglishZimbabwe: "English Zimbabwe (en-ZW)", SubLangEstonianEstonia: "Estonian Estonia (et-EE)", SubLangFaroeseFaroeIslands: "Faroese Faroe Islands (fo-FO)", SubLangFilipinoPhilippines: "Filipino Philippines (fil-PH)", SubLangFinnishFinland: "Finnish Finland (fi-FI)", SubLangFrenchBelgium: "French Belgium (fr-BE)", SubLangFrenchCameroon: "French Cameroon (fr-CM)", SubLangFrenchCanada: "French Canada (fr-CA)", SubLangFrenchCaribbean: "French Caribbean (fr-029)", SubLangFrenchCongoDrc: "French Congo, Drc (fr-CD)", SubLangFrenchCôteDivoire: "French Côte D'ivoire (fr-CI)", SubLangFrenchFrance: "French France (fr-FR)", SubLangFrenchHaiti: "French Haiti (fr-HT)", SubLangFrenchLuxembourg: "French Luxembourg (fr-LU)", SubLangFrenchMali: "French Mali (fr-ML)", SubLangFrenchMorocco: "French Morocco (fr-MA)", SubLangFrenchPrincipalityOfMonaco: "French Principality Of Monaco (fr-MC)", SubLangFrenchReunion: "French Reunion (fr-RE)", SubLangFrenchSenegal: "French Senegal (fr-SN)", SubLangFrenchSwitzerland: "French Switzerland (fr-CH)", SubLangFrisianNetherlands: "Frisian Netherlands (fy-NL)", SubLangFulahNigeria: "Fulah Nigeria (ff-NG)", SubLangFulahLatinNigeria: "Fulah (Latin) Nigeria (ff-Latn-NG)", SubLangFulahSenegal: "Fulah Senegal (ff-Latn-SN)", SubLangGalicianSpain: "Galician Spain (gl-ES)", SubLangGeorgianGeorgia: "Georgian Georgia (ka-GE)", SubLangGermanAustria: "German Austria (de-AT)", SubLangGermanGermany: "German Germany (de-DE)", SubLangGermanLiechtenstein: "German Liechtenstein (de-LI)", SubLangGermanLuxembourg: "German Luxembourg (de-LU)", SubLangGermanSwitzerland: "German Switzerland (de-CH)", SubLangGreekGreece: "Greek Greece (el-GR)", SubLangGreenlandicGreenland: "Greenlandic Greenland (kl-GL)", SubLangGuaraniParaguay: "Guarani Paraguay (gn-PY)", SubLangGujaratiIndia: "Gujarati India (gu-IN)", SubLangHausaLatin: "Hausa (Latin) (ha-Latn)", SubLangHausaLatinNigeria: "Hausa (Latin) Nigeria (ha-Latn-NG)", SubLangHawaiianUnitedStates: "Hawaiian United States (haw-US)", SubLangHebrewIsrael: "Hebrew Israel (he-IL)", SubLangHindiIndia: "Hindi India (hi-IN)", SubLangHungarianHungary: "Hungarian Hungary (hu-HU)", SubLangIcelandicIceland: "Icelandic Iceland (is-IS)", SubLangIgboNigeria: "Igbo Nigeria (ig-NG)", SubLangIndonesianIndonesia: "Indonesian Indonesia (id-ID)", SubLangInuktitutLatin: "Inuktitut (Latin) (iu-Latn)", SubLangInuktitutLatinCanada: "Inuktitut (Latin) Canada (iu-Latn-CA)", SubLangInuktitutSyllabics: "Inuktitut (Syllabics) (iu-Cans)", SubLangInuktitutSyllabicsCanada: "Inuktitut (Syllabics) Canada (iu-Cans-CA)", SubLangIrishIreland: "Irish Ireland (ga-IE)", SubLangItalianItaly: "Italian Italy (it-IT)", SubLangItalianSwitzerland: "Italian Switzerland (it-CH)", SubLangJapaneseJapan: "Japanese Japan (ja-JP)", SubLangKannadaIndia: "Kannada India (kn-IN)", SubLangKanuriLatinNigeria: "Kanuri (Latin) Nigeria (kr-Latn-NG)", SubLangKashmiriPersoArabic: "Kashmiri Perso-Arabic (ks-Arab)", SubLangKashmiriDevanagariIndia: "Kashmiri (Devanagari) India (ks-Deva-IN)", SubLangKazakhKazakhstan: "Kazakh Kazakhstan (kk-KZ)", SubLangKhmerCambodia: "Khmer Cambodia (km-KH)", SubLangKicheGuatemala: "K'iche Guatemala (quc-Latn-GT)", SubLangKinyarwandaRwanda: "Kinyarwanda Rwanda (rw-RW)", SubLangKiswahiliKenya: "Kiswahili Kenya (sw-KE)", SubLangKonkaniIndia: "Konkani India (kok-IN)", SubLangKoreanKorea: "Korean Korea (ko-KR)", SubLangKyrgyzKyrgyzstan: "Kyrgyz Kyrgyzstan (ky-KG)", SubLangLaoLaoPdr: "Lao Lao P.d.r. (lo-LA)", SubLangLatinVaticanCity: "Latin Vatican City (la-VA)", SubLangLatvianLatvia: "Latvian Latvia (lv-LV)", SubLangLithuanianLithuania: "Lithuanian Lithuania (lt-LT)", SubLangLowerSorbianGermany: "Lower Sorbian Germany (dsb-DE)", SubLangLuxembourgishLuxembourg: "Luxembourgish Luxembourg (lb-LU)", SubLangMacedonianNorthMacedonia: "Macedonian North Macedonia (mk-MK)", SubLangMalayBruneiDarussalam: "Malay Brunei Darussalam (ms-BN)", SubLangMalayMalaysia: "Malay Malaysia (ms-MY)", SubLangMalayalamIndia: "Malayalam India (ml-IN)", SubLangMalteseMalta: "Maltese Malta (mt-MT)", SubLangMaoriNewZealand: "Maori New Zealand (mi-NZ)", SubLangMapudungunChile: "Mapudungun Chile (arn-CL)", SubLangMarathiIndia: "Marathi India (mr-IN)", SubLangMohawkCanada: "Mohawk Canada (moh-CA)", SubLangMongolianCyrillic: "Mongolian (Cyrillic) (mn-Cyrl)", SubLangMongolianCyrillicMongolia: "Mongolian (Cyrillic) Mongolia (mn-MN)", SubLangMongolianTraditionalMongolian: "Mongolian (Traditional Mongolian) (mn-Mong)", SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina: "Mongolian (Traditional Mongolian) People's Republic Of China (mn-MongCN)", SubLangMongolianTraditionalMongolianMongolia: "Mongolian (Traditional Mongolian) Mongolia (mn-MongMN)", SubLangNepaliIndia: "Nepali India (ne-IN)", SubLangNepaliNepal: "Nepali Nepal (ne-NP)", SubLangNorwegianBokmalNorway: "Norwegian (Bokmal) Norway (nb-NO)", SubLangNorwegianNynorskNorway: "Norwegian (Nynorsk) Norway (nn-NO)", SubLangOccitanFrance: "Occitan France (oc-FR)", SubLangOdiaIndia: "Odia India (or-IN)", SubLangOromoEthiopia: "Oromo Ethiopia (om-ET)", SubLangPashtoAfghanistan: "Pashto Afghanistan (ps-AF)", SubLangPersianIran: "Persian Iran (fa-IR)", SubLangPolishPoland: "Polish Poland (pl-PL)", SubLangPortugueseBrazil: "Portuguese Brazil (pt-BR)", SubLangPortuguesePortugal: "Portuguese Portugal (pt-PT)", SubLangPseudoLanguagePseudoLocaleForEastAsianComplexScriptLocalizationTesting: "Pseudo Language Pseudo Locale For East Asian/Complex Script Localization Testing (qps-ploca)", SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTesting: "Pseudo Language Pseudo Locale Used For Localization Testing (qps-ploc)", SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTestingOfMirroredLocales: "Pseudo Language Pseudo Locale Used For Localization Testing Of Mirrored Locales (qps-plocm)", SubLangPunjabi: "Punjabi (pa-Arab)", SubLangPunjabiIndia: "Punjabi India (pa-IN)", SubLangPunjabiIslamicRepublicOfPakistan: "Punjabi Islamic Republic Of Pakistan (pa-Arab-PK)", SubLangQuechuaBolivia: "Quechua Bolivia (quz-BO)", SubLangQuechuaEcuador: "Quechua Ecuador (quz-EC)", SubLangQuechuaPeru: "Quechua Peru (quz-PE)", SubLangRomanianMoldova: "Romanian Moldova (ro-MD)", SubLangRomanianRomania: "Romanian Romania (ro-RO)", SubLangRomanshSwitzerland: "Romansh Switzerland (rm-CH)", SubLangRussianMoldova: "Russian Moldova (ru-MD)", SubLangRussianRussia: "Russian Russia (ru-RU)", SubLangSakhaRussia: "Sakha Russia (sah-RU)", SubLangSamiInariFinland: "Sami (Inari) Finland (smn-FI)", SubLangSamiLuleNorway: "Sami (Lule) Norway (smj-NO)", SubLangSamiLuleSweden: "Sami (Lule) Sweden (smj-SE)", SubLangSamiNorthernFinland: "Sami (Northern) Finland (se-FI)", SubLangSamiNorthernNorway: "Sami (Northern) Norway (se-NO)", SubLangSamiNorthernSweden: "Sami (Northern) Sweden (se-SE)", SubLangSamiSkoltFinland: "Sami (Skolt) Finland (sms-FI)", SubLangSamiSouthernNorway: "Sami (Southern) Norway (sma-NO)", SubLangSamiSouthernSweden: "Sami (Southern) Sweden (sma-SE)", SubLangSanskritIndia: "Sanskrit India (sa-IN)", SubLangScottishGaelicUnitedKingdom: "Scottish Gaelic United Kingdom (gd-GB)", SubLangSerbianCyrillic: "Serbian (Cyrillic) (sr-Cyrl)", SubLangSerbianCyrillicBosniaAndHerzegovina: "Serbian (Cyrillic) Bosnia And Herzegovina (sr-Cyrl-BA)", SubLangSerbianCyrillicMontenegro: "Serbian (Cyrillic) Montenegro (sr-Cyrl-ME)", SubLangSerbianCyrillicSerbia: "Serbian (Cyrillic) Serbia (sr-Cyrl-RS)", SubLangSerbianCyrillicSerbiaAndMontenegroFormer: "Serbian (Cyrillic) Serbia And Montenegro (Former) (sr-Cyrl-CS)", SubLangSerbianLatin: "Serbian (Latin) (sr-Latn)", SubLangSerbianLatinBosniaAndHerzegovina: "Serbian (Latin) Bosnia And Herzegovina (sr-Latn-BA)", SubLangSerbianLatinMontenegro: "Serbian (Latin) Montenegro (sr-Latn-ME)", SubLangSerbianLatinSerbia: "Serbian (Latin) Serbia (sr-Latn-RS)", SubLangSerbianLatinSerbiaAndMontenegroFormer: "Serbian (Latin) Serbia And Montenegro (Former) (sr-Latn-CS)", SubLangSesothoSaLeboaSouthAfrica: "Sesotho Sa Leboa South Africa (nso-ZA)", SubLangSetswanaBotswana: "Setswana Botswana (tn-BW)", SubLangSetswanaSouthAfrica: "Setswana South Africa (tn-ZA)", SubLangSindhi: "Sindhi (sd-Arab)", SubLangSindhiIslamicRepublicOfPakistan: "Sindhi Islamic Republic Of Pakistan (sd-Arab-PK)", SubLangSinhalaSriLanka: "Sinhala Sri Lanka (si-LK)", SubLangSlovakSlovakia: "Slovak Slovakia (sk-SK)", SubLangSlovenianSlovenia: "Slovenian Slovenia (sl-SI)", SubLangSomaliSomalia: "Somali Somalia (so-SO)", SubLangSothoSouthAfrica: "Sotho South Africa (st-ZA)", SubLangSpanishArgentina: "Spanish Argentina (es-AR)", SubLangSpanishBolivarianRepublicOfVenezuela: "Spanish Bolivarian Republic Of Venezuela (es-VE)", SubLangSpanishBolivia: "Spanish Bolivia (es-BO)", SubLangSpanishChile: "Spanish Chile (es-CL)", SubLangSpanishColombia: "Spanish Colombia (es-CO)", SubLangSpanishCostaRica: "Spanish Costa Rica (es-CR)", SubLangSpanishCuba: "Spanish Cuba (es-CU)", SubLangSpanishDominicanRepublic: "Spanish Dominican Republic (es-DO)", SubLangSpanishEcuador: "Spanish Ecuador (es-EC)", SubLangSpanishElSalvador: "Spanish El Salvador (es-SV)", SubLangSpanishGuatemala: "Spanish Guatemala (es-GT)", SubLangSpanishHonduras: "Spanish Honduras (es-HN)", SubLangSpanishLatinAmerica: "Spanish Latin America (es-419)", SubLangSpanishMexico: "Spanish Mexico (es-MX)", SubLangSpanishNicaragua: "Spanish Nicaragua (es-NI)", SubLangSpanishPanama: "Spanish Panama (es-PA)", SubLangSpanishParaguay: "Spanish Paraguay (es-PY)", SubLangSpanishPeru: "Spanish Peru (es-PE)", SubLangSpanishPuertoRico: "Spanish Puerto Rico (es-PR)", SubLangSpanishSpainTraditional: "Spanish Spain (es-ES_tradnl)", SubLangSpanishSpain: "Spanish Spain (es-ES)", SubLangSpanishUnitedStates: "Spanish United States (es-US)", SubLangSpanishUruguay: "Spanish Uruguay (es-UY)", SubLangSwedishFinland: "Swedish Finland (sv-FI)", SubLangSwedishSweden: "Swedish Sweden (sv-SE)", SubLangSyriacSyria: "Syriac Syria (syr-SY)", SubLangTajikCyrillic: "Tajik (Cyrillic) (tg-Cyrl)", SubLangTajikCyrillicTajikistan: "Tajik (Cyrillic) Tajikistan (tg-Cyrl-TJ)", SubLangTamazightLatin: "Tamazight (Latin) (tzm-Latn)", SubLangTamazightLatinAlgeria: "Tamazight (Latin) Algeria (tzm-Latn-DZ)", SubLangTamilIndia: "Tamil India (ta-IN)", SubLangTamilSriLanka: "Tamil Sri Lanka (ta-LK)", SubLangTatarRussia: "Tatar Russia (tt-RU)", SubLangTeluguIndia: "Telugu India (te-IN)", SubLangThaiThailand: "Thai Thailand (th-TH)", SubLangTibetanPeoplesRepublicOfChina: "Tibetan People's Republic Of China (bo-CN)", SubLangTigrinyaEritrea: "Tigrinya Eritrea (ti-ER)", SubLangTigrinyaEthiopia: "Tigrinya Ethiopia (ti-ET)", SubLangTsongaSouthAfrica: "Tsonga South Africa (ts-ZA)", SubLangTurkishTurkey: "Turkish Turkey (tr-TR)", SubLangTurkmenTurkmenistan: "Turkmen Turkmenistan (tk-TM)", SubLangUkrainianUkraine: "Ukrainian Ukraine (uk-UA)", SubLangUpperSorbianGermany: "Upper Sorbian Germany (hsb-DE)", SubLangUrduIndia: "Urdu India (ur-IN)", SubLangUrduIslamicRepublicOfPakistan: "Urdu Islamic Republic Of Pakistan (ur-PK)", SubLangUyghurPeoplesRepublicOfChina: "Uyghur People's Republic Of China (ug-CN)", SubLangUzbekCyrillic: "Uzbek (Cyrillic) (uz-Cyrl)", SubLangUzbekCyrillicUzbekistan: "Uzbek (Cyrillic) Uzbekistan (uz-Cyrl-UZ)", SubLangUzbekLatin: "Uzbek (Latin) (uz-Latn)", SubLangUzbekLatinUzbekistan: "Uzbek (Latin) Uzbekistan (uz-Latn-UZ)", SubLangValencianSpain: "Valencian Spain (ca-ESvalencia)", SubLangVendaSouthAfrica: "Venda South Africa (ve-ZA)", SubLangVietnameseVietnam: "Vietnamese Vietnam (vi-VN)", SubLangWelshUnitedKingdom: "Welsh United Kingdom (cy-GB)", SubLangWolofSenegal: "Wolof Senegal (wo-SN)", SubLangXhosaSouthAfrica: "Xhosa South Africa (xh-ZA)", SubLangYiPeoplesRepublicOfChina: "Yi People's Republic Of China (ii-CN)", SubLangYiddishWorld: "Yiddish World (yi-001)", SubLangYorubaNigeria: "Yoruba Nigeria (yo-NG)", SubLangZuluSouthAfrica: "Zulu South Africa (zu-ZA)", } if val, ok := rsrcSubLangMap[subLang]; ok { return val } return "?" } // PrettyResourceLang prettifies the resource lang and sub lang. func PrettyResourceLang(lang ResourceLang, subLang int) string { m := map[ResourceLang]map[int]ResourceSubLang{ LangAfrikaans: { 0x1: SubLangAfrikaansSouthAfrica, }, LangAlbanian: { 0x1: SubLangAlbanianAlbania, }, LangAlsatian: { 0x1: SubLangAlsatianFrance, }, LangAmharic: { 0x1: SubLangAmharicEthiopia, }, LangArabic: { 0x5: SubLangArabicAlgeria, 0xf: SubLangArabicBahrain, 0x3: SubLangArabicEgypt, 0x2: SubLangArabicIraq, 0xb: SubLangArabicJordan, 0xd: SubLangArabicKuwait, 0xc: SubLangArabicLebanon, 0x4: SubLangArabicLibya, 0x6: SubLangArabicMorocco, 0x8: SubLangArabicOman, 0x10: SubLangArabicQatar, 0x1: SubLangArabicSaudiArabia, 0xa: SubLangArabicSyria, 0x7: SubLangArabicTunisia, 0xe: SubLangArabicUae, 0x9: SubLangArabicYemen, }, LangArmenian: { 0x1: SubLangArmenianArmenia, }, LangAssamese: { 0x1: SubLangAssameseIndia, 0x1d: SubLangAzerbaijaniCyrillic, 0x2: SubLangAzerbaijaniCyrillicAzerbaijan, }, LangAzerbaijaniLatin: { 0x1e: SubLangAzerbaijaniLatin, 0x1: SubLangAzerbaijaniLatinAzerbaijan, }, LangBangla: { 0x2: SubLangBanglaBangladesh, 0x1: SubLangBanglaIndia, }, LangBashkir: { 0x1: SubLangBashkirRussia, }, LangBasque: { 0x1: SubLangBasqueSpain, }, LangBelarusian: { 0x1: SubLangBelarusianBelarus, 0x19: SubLangBosnianCyrillic, 0x8: SubLangBosnianCyrillicBosniaAndHerzegovina, 0x1a: SubLangBosnianLatin, }, LangBosnianLatin: { 0x5: SubLangBosnianLatinBosniaAndHerzegovina, }, LangBreton: { 0x1: SubLangBretonFrance, }, LangBulgarian: { 0x1: SubLangBulgarianBulgaria, }, LangBurmese: { 0x1: SubLangBurmeseMyanmar, }, LangCatalan: { 0x1: SubLangCatalanSpain, }, LangCentralKurdish: { 0x1f: SubLangCentralKurdish, 0x1: SubLangCentralKurdishIraq, }, LangCherokee: { 0x1f: SubLangCherokee, 0x1: SubLangCherokeeUnitedStates, 0x0: SubLangChineseSimplified, }, LangChineseSimplified: { 0x2: SubLangChineseSimplifiedPeoplesRepublicOfChina, 0x4: SubLangChineseSimplifiedSingapore, 0x1f: SubLangChineseTraditional, 0x3: SubLangChineseTraditionalHongKongSar, 0x5: SubLangChineseTraditionalMacaoSar, 0x1: SubLangChineseTraditionalTaiwan, }, LangCorsican: { 0x1: SubLangCorsicanFrance, }, LangCroatian: { 0x1: SubLangCroatianCroatia, 0x4: SubLangCroatianLatinBosniaAndHerzegovina, }, LangCzech: { 0x1: SubLangCzechCzechRepublic, }, LangDanish: { 0x1: SubLangDanishDenmark, }, LangDari: { 0x1: SubLangDariAfghanistan, }, LangDivehi: { 0x1: SubLangDivehiMaldives, }, LangDutch: { 0x2: SubLangDutchBelgium, 0x1: SubLangDutchNetherlands, 0x3: SubLangDzongkhaBhutan, }, LangEnglish: { 0x3: SubLangEnglishAustralia, 0xa: SubLangEnglishBelize, 0x4: SubLangEnglishCanada, 0x9: SubLangEnglishCaribbean, 0xf: SubLangEnglishHongKong, 0x10: SubLangEnglishIndia, 0x6: SubLangEnglishIreland, 0x8: SubLangEnglishJamaica, 0x11: SubLangEnglishMalaysia, 0x5: SubLangEnglishNewZealand, 0xd: SubLangEnglishRepublicOfThePhilippines, 0x12: SubLangEnglishSingapore, 0x7: SubLangEnglishSouthAfrica, 0xb: SubLangEnglishTrinidadAndTobago, 0x13: SubLangEnglishUnitedArabEmirates, 0x2: SubLangEnglishUnitedKingdom, 0x1: SubLangEnglishUnitedStates, 0xc: SubLangEnglishZimbabwe, }, LangEstonian: { 0x1: SubLangEstonianEstonia, }, LangFaroese: { 0x1: SubLangFaroeseFaroeIslands, }, LangFilipino: { 0x1: SubLangFilipinoPhilippines, }, LangFinnish: { 0x1: SubLangFinnishFinland, }, LangFrench: { 0x2: SubLangFrenchBelgium, 0xb: SubLangFrenchCameroon, 0x3: SubLangFrenchCanada, 0x7: SubLangFrenchCaribbean, 0x9: SubLangFrenchCongoDrc, 0xc: SubLangFrenchCôteDivoire, 0x1: SubLangFrenchFrance, 0xf: SubLangFrenchHaiti, 0x5: SubLangFrenchLuxembourg, 0xd: SubLangFrenchMali, 0xe: SubLangFrenchMorocco, 0x6: SubLangFrenchPrincipalityOfMonaco, 0x8: SubLangFrenchReunion, 0xa: SubLangFrenchSenegal, 0x4: SubLangFrenchSwitzerland, }, LangFrisian: { 0x1: SubLangFrisianNetherlands, }, LangFulah: { 0x1: SubLangFulahNigeria, 0x2: SubLangFulahSenegal, }, LangFulahLatin: { 0x1: SubLangFulahLatinNigeria, }, LangGalician: { 0x1: SubLangGalicianSpain, }, LangGeorgian: { 0x1: SubLangGeorgianGeorgia, }, LangGerman: { 0x3: SubLangGermanAustria, 0x1: SubLangGermanGermany, 0x5: SubLangGermanLiechtenstein, 0x4: SubLangGermanLuxembourg, 0x2: SubLangGermanSwitzerland, }, LangGreek: { 0x1: SubLangGreekGreece, }, LangGreenlandic: { 0x1: SubLangGreenlandicGreenland, }, LangGuarani: { 0x1: SubLangGuaraniParaguay, }, LangGujarati: { 0x1: SubLangGujaratiIndia, }, LangHausaLatin: { 0x1f: SubLangHausaLatin, 0x1: SubLangHausaLatinNigeria, }, LangHawaiian: { 0x1: SubLangHawaiianUnitedStates, }, LangHebrew: { 0x1: SubLangHebrewIsrael, }, LangHindi: { 0x1: SubLangHindiIndia, }, LangHungarian: { 0x1: SubLangHungarianHungary, }, LangIcelandic: { 0x1: SubLangIcelandicIceland, }, LangIgbo: { 0x1: SubLangIgboNigeria, }, LangIndonesian: { 0x1: SubLangIndonesianIndonesia, }, LangInuktitutLatin: { 0x1f: SubLangInuktitutLatin, 0x2: SubLangInuktitutLatinCanada, 0x1e: SubLangInuktitutSyllabics, 0x1: SubLangInuktitutSyllabicsCanada, }, LangIrish: { 0x2: SubLangIrishIreland, }, LangItalian: { 0x1: SubLangItalianItaly, 0x2: SubLangItalianSwitzerland, }, LangJapanese: { 0x1: SubLangJapaneseJapan, }, LangKannada: { 0x1: SubLangKannadaIndia, }, LangKashmiri: { 0x1: SubLangKashmiriPersoArabic, 0x2: SubLangKashmiriDevanagariIndia, }, LangKazakh: { 0x1: SubLangKazakhKazakhstan, }, LangKhmer: { 0x1: SubLangKhmerCambodia, }, LangKiche: { 0x1: SubLangKicheGuatemala, }, LangKinyarwanda: { 0x1: SubLangKinyarwandaRwanda, }, LangKiswahili: { 0x1: SubLangKiswahiliKenya, }, LangKonkani: { 0x1: SubLangKonkaniIndia, }, LangKorean: { 0x1: SubLangKoreanKorea, }, LangKyrgyz: { 0x1: SubLangKyrgyzKyrgyzstan, }, LangLao: { 0x1: SubLangLaoLaoPdr, }, LangLatvian: { 0x1: SubLangLatvianLatvia, }, LangLithuanian: { 0x1: SubLangLithuanianLithuania, }, LangLowerSorbian: { 0x2: SubLangLowerSorbianGermany, }, LangLuxembourgish: { 0x1: SubLangLuxembourgishLuxembourg, }, LangMacedonian: { 0x1: SubLangMacedonianNorthMacedonia, }, LangMalay: { 0x2: SubLangMalayBruneiDarussalam, 0x1: SubLangMalayMalaysia, }, LangMalayalam: { 0x1: SubLangMalayalamIndia, }, LangMaltese: { 0x1: SubLangMalteseMalta, }, LangMaori: { 0x1: SubLangMaoriNewZealand, }, LangMapudungun: { 0x1: SubLangMapudungunChile, }, LangMarathi: { 0x1: SubLangMarathiIndia, }, LangMohawk: { 0x1: SubLangMohawkCanada, }, LangMongolianCyrillic: { 0x1e: SubLangMongolianCyrillic, 0x1: SubLangMongolianCyrillicMongolia, 0x1f: SubLangMongolianTraditionalMongolian, 0x2: SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina, 0x3: SubLangMongolianTraditionalMongolianMongolia, }, LangNepali: { 0x2: SubLangNepaliIndia, 0x1: SubLangNepaliNepal, }, LangNorwegianBokmalNo: {}, LangNorwegianBokmal: { 0x1: SubLangNorwegianBokmalNorway, }, LangNorwegianNynorsk: { 0x2: SubLangNorwegianNynorskNorway, }, LangOccitan: { 0x1: SubLangOccitanFrance, }, LangOdia: { 0x1: SubLangOdiaIndia, }, LangOromo: { 0x1: SubLangOromoEthiopia, }, LangPashto: { 0x1: SubLangPashtoAfghanistan, }, LangPersian: { 0x1: SubLangPersianIran, }, LangPolish: { 0x1: SubLangPolishPoland, }, LangPortuguese: { 0x1: SubLangPortugueseBrazil, 0x2: SubLangPortuguesePortugal, }, LangPunjabi: { 0x1f: SubLangPunjabi, 0x1: SubLangPunjabiIndia, 0x2: SubLangPunjabiIslamicRepublicOfPakistan, }, LangQuechua: { 0x1: SubLangQuechuaBolivia, 0x2: SubLangQuechuaEcuador, 0x3: SubLangQuechuaPeru, }, LangRomanian: { 0x2: SubLangRomanianMoldova, 0x1: SubLangRomanianRomania, }, LangRomansh: { 0x1: SubLangRomanshSwitzerland, }, LangRussian: { 0x2: SubLangRussianMoldova, 0x1: SubLangRussianRussia, }, LangSakha: { 0x1: SubLangSakhaRussia, }, LangSamiInari: { 0x9: SubLangSamiInariFinland, }, LangSamiLule: { 0x4: SubLangSamiLuleNorway, 0x5: SubLangSamiLuleSweden, }, LangSamiNorthern: { 0x3: SubLangSamiNorthernFinland, 0x1: SubLangSamiNorthernNorway, 0x2: SubLangSamiNorthernSweden, }, LangSamiSkolt: { 0x8: SubLangSamiSkoltFinland, }, LangSamiSouthern: { 0x6: SubLangSamiSouthernNorway, 0x7: SubLangSamiSouthernSweden, }, LangSanskrit: { 0x1: SubLangSanskritIndia, }, LangScottishGaelic: { 0x1: SubLangScottishGaelicUnitedKingdom, 0x1b: SubLangSerbianCyrillic, 0x7: SubLangSerbianCyrillicBosniaAndHerzegovina, 0xc: SubLangSerbianCyrillicMontenegro, 0xa: SubLangSerbianCyrillicSerbia, 0x3: SubLangSerbianCyrillicSerbiaAndMontenegroFormer, 0x1c: SubLangSerbianLatin, }, LangSerbianLatin: { 0x6: SubLangSerbianLatinBosniaAndHerzegovina, 0xb: SubLangSerbianLatinMontenegro, 0x9: SubLangSerbianLatinSerbia, 0x2: SubLangSerbianLatinSerbiaAndMontenegroFormer, }, LangSesothoSaLeboa: { 0x1: SubLangSesothoSaLeboaSouthAfrica, }, LangSetswana: { 0x2: SubLangSetswanaBotswana, 0x1: SubLangSetswanaSouthAfrica, }, LangSindhi: { 0x1f: SubLangSindhi, 0x2: SubLangSindhiIslamicRepublicOfPakistan, }, LangSinhala: { 0x1: SubLangSinhalaSriLanka, }, LangSlovak: { 0x1: SubLangSlovakSlovakia, }, LangSlovenian: { 0x1: SubLangSlovenianSlovenia, }, LangSomali: { 0x1: SubLangSomaliSomalia, }, LangSotho: { 0x1: SubLangSothoSouthAfrica, }, LangSpanish: { 0xb: SubLangSpanishArgentina, 0x8: SubLangSpanishBolivarianRepublicOfVenezuela, 0x10: SubLangSpanishBolivia, 0xd: SubLangSpanishChile, 0x9: SubLangSpanishColombia, 0x5: SubLangSpanishCostaRica, 0x17: SubLangSpanishCuba, 0x7: SubLangSpanishDominicanRepublic, 0xc: SubLangSpanishEcuador, 0x11: SubLangSpanishElSalvador, 0x4: SubLangSpanishGuatemala, 0x12: SubLangSpanishHonduras, 0x16: SubLangSpanishLatinAmerica, 0x2: SubLangSpanishMexico, 0x13: SubLangSpanishNicaragua, 0x6: SubLangSpanishPanama, 0xf: SubLangSpanishParaguay, 0xa: SubLangSpanishPeru, 0x14: SubLangSpanishPuertoRico, 0x1: SubLangSpanishSpain, 0x3: SubLangSpanishSpain, 0x15: SubLangSpanishUnitedStates, 0xe: SubLangSpanishUruguay, }, LangSwedish: { 0x2: SubLangSwedishFinland, 0x1: SubLangSwedishSweden, }, LangSyriac: { 0x1: SubLangSyriacSyria, }, LangTajikCyrillic: { 0x1f: SubLangTajikCyrillic, 0x1: SubLangTajikCyrillicTajikistan, }, LangTamazightLatin: { 0x1f: SubLangTamazightLatin, 0x2: SubLangTamazightLatinAlgeria, }, LangTamil: { 0x1: SubLangTamilIndia, 0x2: SubLangTamilSriLanka, }, LangTatar: { 0x1: SubLangTatarRussia, }, LangTelugu: { 0x1: SubLangTeluguIndia, }, LangThai: { 0x1: SubLangThaiThailand, }, LangTibetan: { 0x1: SubLangTibetanPeoplesRepublicOfChina, }, LangTigrinya: { 0x2: SubLangTigrinyaEritrea, 0x1: SubLangTigrinyaEthiopia, }, LangTsonga: { 0x1: SubLangTsongaSouthAfrica, }, LangTurkish: { 0x1: SubLangTurkishTurkey, }, LangTurkmen: { 0x1: SubLangTurkmenTurkmenistan, }, LangUkrainian: { 0x1: SubLangUkrainianUkraine, }, LangUpperSorbian: { 0x1: SubLangUpperSorbianGermany, }, LangUrdu: { 0x2: SubLangUrduIndia, 0x1: SubLangUrduIslamicRepublicOfPakistan, }, LangUyghur: { 0x1: SubLangUyghurPeoplesRepublicOfChina, 0x1e: SubLangUzbekCyrillic, 0x2: SubLangUzbekCyrillicUzbekistan, }, LangUzbekLatin: { 0x1f: SubLangUzbekLatin, 0x1: SubLangUzbekLatinUzbekistan, 0x2: SubLangValencianSpain, }, LangVenda: { 0x1: SubLangVendaSouthAfrica, }, LangVietnamese: { 0x1: SubLangVietnameseVietnam, }, LangWelsh: { 0x1: SubLangWelshUnitedKingdom, }, LangWolof: { 0x1: SubLangWolofSenegal, }, LangXhosa: { 0x1: SubLangXhosaSouthAfrica, }, LangYi: { 0x1: SubLangYiPeoplesRepublicOfChina, }, LangYoruba: { 0x1: SubLangYorubaNigeria, }, LangZulu: { 0x1: SubLangZuluSouthAfrica, }, } if val, ok := m[lang][subLang]; ok { return val.String() } return "?" } ================================================ FILE: resource_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestRsrcDir struct { Level1ImgRsrcDir ImageResourceDirectory Level2Index int Level2ImgRsrcDir ImageResourceDirectory Level3Index int Level3ImgRsrcDir ImageResourceDirectory Level3RsrcDirEntry ResourceDirectoryEntry Level4Index int Level4RsrcDirEntry ResourceDirectoryEntry } func TestParseResourceDirectory(t *testing.T) { tests := []struct { in string out TestRsrcDir }{ { getAbsoluteFilePath("test/putty.exe"), TestRsrcDir{ Level1ImgRsrcDir: ImageResourceDirectory{ Characteristics: 0x0, TimeDateStamp: 0x0, MajorVersion: 0x0, MinorVersion: 0x0, NumberOfNamedEntries: 0x0, NumberOfIDEntries: 0x6, }, Level2Index: 0x3, Level2ImgRsrcDir: ImageResourceDirectory{ Characteristics: 0x0, TimeDateStamp: 0x0, MajorVersion: 0x0, MinorVersion: 0x0, NumberOfNamedEntries: 0x0, NumberOfIDEntries: 0x1, }, Level3Index: 0x0, Level3ImgRsrcDir: ImageResourceDirectory{ Characteristics: 0x0, TimeDateStamp: 0x0, MajorVersion: 0x0, MinorVersion: 0x0, NumberOfNamedEntries: 0x0, NumberOfIDEntries: 0x1, }, Level4Index: 0x0, Level4RsrcDirEntry: ResourceDirectoryEntry{ Struct: ImageResourceDirectoryEntry{ Name: 0x409, OffsetToData: 0x460, }, Name: "", ID: 0x409, IsResourceDir: false, Data: ResourceDataEntry{ Lang: 0x9, SubLang: 0x1, Struct: ImageResourceDataEntry{ OffsetToData: 0x124838, Size: 0x324, CodePage: 0x0, Reserved: 0x0, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryResource] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryResource] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseResourceDirectory(va, size) if err != nil { t.Fatalf("parseResourceDirectory(%s) failed, reason: %v", tt.in, err) } rsrc := file.Resources if rsrc.Struct != tt.out.Level1ImgRsrcDir { t.Fatalf("level 1 resource directory assertion failed, got %v, want %v", rsrc.Struct, tt.out.Level1ImgRsrcDir) } rsrcDirLevel2 := rsrc.Entries[tt.out.Level2Index].Directory if rsrcDirLevel2.Struct != tt.out.Level2ImgRsrcDir { t.Fatalf("level 2 resource directory assertion failed, got %v, want %v", rsrc.Struct, tt.out.Level2ImgRsrcDir) } rsrcDirLevel3 := rsrcDirLevel2.Entries[tt.out.Level3Index].Directory if rsrcDirLevel3.Struct != tt.out.Level3ImgRsrcDir { t.Fatalf("level 3 resource directory assertion failed, got %v, want %v", rsrc.Struct, tt.out.Level3ImgRsrcDir) } rsrcDirEntry := rsrcDirLevel3.Entries[tt.out.Level4Index] if !reflect.DeepEqual(rsrcDirEntry, tt.out.Level4RsrcDirEntry) { t.Fatalf("level 3 resource directory entry assertion failed, got %v, want %v", rsrc.Struct, tt.out.Level3ImgRsrcDir) } }) } } func TestResourceTypeString(t *testing.T) { tests := []struct { in ResourceType out string }{ { RTCursor, "Cursor", }, { ResourceType(0xff), "?", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { rsrcTypeString := tt.in.String() if rsrcTypeString != tt.out { t.Fatalf("resource type string conversion failed, got %v, want %v", rsrcTypeString, tt.out) } }) } } func TestResourceLangString(t *testing.T) { tests := []struct { in ResourceLang out string }{ { LangArabic, "Arabic (ar)", }, { ResourceLang(0xffff), "?", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { rsrcLangString := tt.in.String() if rsrcLangString != tt.out { t.Fatalf("resource language string conversion failed, got %v, want %v", rsrcLangString, tt.out) } }) } } func TestResourceSubLangString(t *testing.T) { tests := []struct { in ResourceSubLang out string }{ { SubLangArabicMorocco, "Arabic Morocco (ar-MA)", }, { ResourceSubLang(0xffff), "?", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { rsrcSubLangString := tt.in.String() if rsrcSubLangString != tt.out { t.Fatalf("resource sub-language string conversion failed, got %v, want %v", rsrcSubLangString, tt.out) } }) } } func TestPrettyResourceLang(t *testing.T) { type resourceLang struct { lang ResourceLang subLang int } tests := []struct { in resourceLang out string }{ { resourceLang{ lang: LangEnglish, subLang: 0x1, }, "English United States (en-US)", }, { resourceLang{ lang: ResourceLang(0xff), subLang: 0x1, }, "?", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { prettyRsrcLang := PrettyResourceLang(tt.in.lang, tt.in.subLang) if prettyRsrcLang != tt.out { t.Fatalf("pretty resource language failed, got %v, want %v", prettyRsrcLang, tt.out) } }) } } ================================================ FILE: richheader.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "bytes" "crypto/md5" "encoding/binary" "fmt" ) const ( // DansSignature ('DanS' as dword) is where the rich header struct starts. DansSignature = 0x536E6144 // RichSignature ('0x68636952' as dword) is where the rich header struct ends. RichSignature = "Rich" // AnoDansSigNotFound is reported when rich header signature was found, but AnoDansSigNotFound = "Rich Header found, but could not locate DanS " + "signature" // AnoPaddingDwordNotZero is reported when rich header signature leading // padding DWORDs are not equal to 0. AnoPaddingDwordNotZero = "Rich header found: 3 leading padding DWORDs " + "not found after DanS signature" ) // CompID represents the `@comp.id` structure. type CompID struct { // The minor version information for the compiler used when building the product. MinorCV uint16 `json:"minor_compiler_version"` // Provides information about the identity or type of the objects used to // build the PE32. ProdID uint16 `json:"product_id"` // Indicates how often the object identified by the former two fields is // referenced by this PE32 file. Count uint32 `json:"count"` // The raw @comp.id structure (unmasked). Unmasked uint32 `json:"unmasked"` } // RichHeader is a structure that is written right after the MZ DOS header. // It consists of pairs of 4-byte integers. And it is also // encrypted using a simple XOR operation using the checksum as the key. // The data between the magic values encodes the ‘bill of materials’ that were // collected by the linker to produce the binary. type RichHeader struct { XORKey uint32 `json:"xor_key"` CompIDs []CompID `json:"comp_ids"` DansOffset int `json:"dans_offset"` Raw []byte `json:"raw"` } // ParseRichHeader parses the rich header struct. func (pe *File) ParseRichHeader() error { rh := RichHeader{} ntHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader richSigOffset := bytes.Index(pe.data[:ntHeaderOffset], []byte(RichSignature)) // For example, .NET executable files do not use the MSVC linker and these // executables do not contain a detectable Rich Header. if richSigOffset < 0 { return nil } // The DWORD following the "Rich" sequence is the XOR key stored by and // calculated by the linker. It is actually a checksum of the DOS header with // the e_lfanew zeroed out, and additionally includes the values of the // unencrypted "Rich" array. Using a checksum with encryption will not only // obfuscate the values, but it also serves as a rudimentary digital // signature. If the checksum is calculated from scratch once the values // have been decrypted, but doesn't match the stored key, it can be assumed // the structure had been tampered with. For those that go the extra step to // recalculate the checksum/key, this simple protection mechanism can be bypassed. rh.XORKey = binary.LittleEndian.Uint32(pe.data[richSigOffset+4:]) // To decrypt the array, start with the DWORD just prior to the `Rich` sequence // and XOR it with the key. Continue the loop backwards, 4 bytes at a time, // until the sequence `DanS` is decrypted. var decRichHeader []uint32 dansSigOffset := -1 estimatedBeginDans := richSigOffset - 4 - binary.Size(ImageDOSHeader{}) for it := 0; it < estimatedBeginDans; it += 4 { buff := binary.LittleEndian.Uint32(pe.data[richSigOffset-4-it:]) res := buff ^ rh.XORKey if res == DansSignature { dansSigOffset = richSigOffset - it - 4 break } decRichHeader = append(decRichHeader, res) } // Probe we successfuly found the `DanS` magic. if dansSigOffset == -1 { pe.Anomalies = append(pe.Anomalies, AnoDansSigNotFound) return nil } // Anomaly check: dansSigOffset is usually found in offset 0x80. if dansSigOffset != 0x80 { pe.Anomalies = append(pe.Anomalies, AnoDanSMagicOffset) } rh.DansOffset = dansSigOffset rh.Raw = pe.data[dansSigOffset : richSigOffset+8] // Reverse the decrypted rich header for i, j := 0, len(decRichHeader)-1; i < j; i, j = i+1, j-1 { decRichHeader[i], decRichHeader[j] = decRichHeader[j], decRichHeader[i] } // After the `DanS` signature, there are some zero-padded In practice, // Microsoft seems to have wanted the entries to begin on a 16-byte // (paragraph) boundary, so the 3 leading padding DWORDs can be safely // skipped as not belonging to the data. if decRichHeader[0] != 0 || decRichHeader[1] != 0 || decRichHeader[2] != 0 { pe.Anomalies = append(pe.Anomalies, AnoPaddingDwordNotZero) } // The array stores entries that are 8-bytes each, broken into 3 members. // Each entry represents either a tool that was employed as part of building // the executable or a statistic. // The @compid struct should be multiple of 8 (bytes), some malformed pe // files have incorrect number of entries. var lenCompIDs int if (len(decRichHeader)-3)%2 != 0 { lenCompIDs = len(decRichHeader) - 1 } else { lenCompIDs = len(decRichHeader) } for i := 3; i < lenCompIDs; i += 2 { cid := CompID{} compid := make([]byte, binary.Size(cid)) binary.LittleEndian.PutUint32(compid, decRichHeader[i]) binary.LittleEndian.PutUint32(compid[4:], decRichHeader[i+1]) buf := bytes.NewReader(compid) err := binary.Read(buf, binary.LittleEndian, &cid) if err != nil { return err } cid.Unmasked = binary.LittleEndian.Uint32(compid) rh.CompIDs = append(rh.CompIDs, cid) } pe.RichHeader = rh pe.HasRichHdr = true checksum := pe.RichHeaderChecksum() if checksum != rh.XORKey { pe.Anomalies = append(pe.Anomalies, "Invalid rich header checksum") } return nil } // RichHeaderChecksum calculate the Rich Header checksum. func (pe *File) RichHeaderChecksum() uint32 { checksum := uint32(pe.RichHeader.DansOffset) // First, calculate the sum of the DOS header bytes each rotated left the // number of times their position relative to the start of the DOS header e.g. // second byte is rotated left 2x using rol operation. for i := 0; i < pe.RichHeader.DansOffset; i++ { // skip over dos e_lfanew field at offset 0x3C if i >= 0x3C && i < 0x40 { continue } b := uint32(pe.data[i]) checksum += ((b << (i % 32)) | (b>>(32-(i%32)))&0xff) checksum &= 0xFFFFFFFF } // Next, take summation of each Rich header entry by combining its ProductId // and BuildNumber into a single 32 bit number and rotating by its count. for _, compid := range pe.RichHeader.CompIDs { checksum += (compid.Unmasked<<(compid.Count%32) | compid.Unmasked>>(32-(compid.Count%32))) checksum &= 0xFFFFFFFF } return checksum } // RichHeaderHash calculate the Rich Header hash. func (pe *File) RichHeaderHash() string { if !pe.HasRichHdr { return "" } richIndex := bytes.Index(pe.RichHeader.Raw, []byte(RichSignature)) if richIndex == -1 { return "" } key := make([]byte, 4) binary.LittleEndian.PutUint32(key, pe.RichHeader.XORKey) rawData := pe.RichHeader.Raw[:richIndex] clearData := make([]byte, len(rawData)) for idx, val := range rawData { clearData[idx] = val ^ key[idx%len(key)] } return fmt.Sprintf("%x", md5.Sum(clearData)) } // ProdIDtoStr maps product ids to MS internal names. // list from: https://github.com/kirschju/richheader func ProdIDtoStr(prodID uint16) string { prodIDtoStrMap := map[uint16]string{ 0x0000: "Unknown", 0x0001: "Import0", 0x0002: "Linker510", 0x0003: "Cvtomf510", 0x0004: "Linker600", 0x0005: "Cvtomf600", 0x0006: "Cvtres500", 0x0007: "Utc11_Basic", 0x0008: "Utc11_C", 0x0009: "Utc12_Basic", 0x000a: "Utc12_C", 0x000b: "Utc12_CPP", 0x000c: "AliasObj60", 0x000d: "VisualBasic60", 0x000e: "Masm613", 0x000f: "Masm710", 0x0010: "Linker511", 0x0011: "Cvtomf511", 0x0012: "Masm614", 0x0013: "Linker512", 0x0014: "Cvtomf512", 0x0015: "Utc12_C_Std", 0x0016: "Utc12_CPP_Std", 0x0017: "Utc12_C_Book", 0x0018: "Utc12_CPP_Book", 0x0019: "Implib700", 0x001a: "Cvtomf700", 0x001b: "Utc13_Basic", 0x001c: "Utc13_C", 0x001d: "Utc13_CPP", 0x001e: "Linker610", 0x001f: "Cvtomf610", 0x0020: "Linker601", 0x0021: "Cvtomf601", 0x0022: "Utc12_1_Basic", 0x0023: "Utc12_1_C", 0x0024: "Utc12_1_CPP", 0x0025: "Linker620", 0x0026: "Cvtomf620", 0x0027: "AliasObj70", 0x0028: "Linker621", 0x0029: "Cvtomf621", 0x002a: "Masm615", 0x002b: "Utc13_LTCG_C", 0x002c: "Utc13_LTCG_CPP", 0x002d: "Masm620", 0x002e: "ILAsm100", 0x002f: "Utc12_2_Basic", 0x0030: "Utc12_2_C", 0x0031: "Utc12_2_CPP", 0x0032: "Utc12_2_C_Std", 0x0033: "Utc12_2_CPP_Std", 0x0034: "Utc12_2_C_Book", 0x0035: "Utc12_2_CPP_Book", 0x0036: "Implib622", 0x0037: "Cvtomf622", 0x0038: "Cvtres501", 0x0039: "Utc13_C_Std", 0x003a: "Utc13_CPP_Std", 0x003b: "Cvtpgd1300", 0x003c: "Linker622", 0x003d: "Linker700", 0x003e: "Export622", 0x003f: "Export700", 0x0040: "Masm700", 0x0041: "Utc13_POGO_I_C", 0x0042: "Utc13_POGO_I_CPP", 0x0043: "Utc13_POGO_O_C", 0x0044: "Utc13_POGO_O_CPP", 0x0045: "Cvtres700", 0x0046: "Cvtres710p", 0x0047: "Linker710p", 0x0048: "Cvtomf710p", 0x0049: "Export710p", 0x004a: "Implib710p", 0x004b: "Masm710p", 0x004c: "Utc1310p_C", 0x004d: "Utc1310p_CPP", 0x004e: "Utc1310p_C_Std", 0x004f: "Utc1310p_CPP_Std", 0x0050: "Utc1310p_LTCG_C", 0x0051: "Utc1310p_LTCG_CPP", 0x0052: "Utc1310p_POGO_I_C", 0x0053: "Utc1310p_POGO_I_CPP", 0x0054: "Utc1310p_POGO_O_C", 0x0055: "Utc1310p_POGO_O_CPP", 0x0056: "Linker624", 0x0057: "Cvtomf624", 0x0058: "Export624", 0x0059: "Implib624", 0x005a: "Linker710", 0x005b: "Cvtomf710", 0x005c: "Export710", 0x005d: "Implib710", 0x005e: "Cvtres710", 0x005f: "Utc1310_C", 0x0060: "Utc1310_CPP", 0x0061: "Utc1310_C_Std", 0x0062: "Utc1310_CPP_Std", 0x0063: "Utc1310_LTCG_C", 0x0064: "Utc1310_LTCG_CPP", 0x0065: "Utc1310_POGO_I_C", 0x0066: "Utc1310_POGO_I_CPP", 0x0067: "Utc1310_POGO_O_C", 0x0068: "Utc1310_POGO_O_CPP", 0x0069: "AliasObj710", 0x006a: "AliasObj710p", 0x006b: "Cvtpgd1310", 0x006c: "Cvtpgd1310p", 0x006d: "Utc1400_C", 0x006e: "Utc1400_CPP", 0x006f: "Utc1400_C_Std", 0x0070: "Utc1400_CPP_Std", 0x0071: "Utc1400_LTCG_C", 0x0072: "Utc1400_LTCG_CPP", 0x0073: "Utc1400_POGO_I_C", 0x0074: "Utc1400_POGO_I_CPP", 0x0075: "Utc1400_POGO_O_C", 0x0076: "Utc1400_POGO_O_CPP", 0x0077: "Cvtpgd1400", 0x0078: "Linker800", 0x0079: "Cvtomf800", 0x007a: "Export800", 0x007b: "Implib800", 0x007c: "Cvtres800", 0x007d: "Masm800", 0x007e: "AliasObj800", 0x007f: "PhoenixPrerelease", 0x0080: "Utc1400_CVTCIL_C", 0x0081: "Utc1400_CVTCIL_CPP", 0x0082: "Utc1400_LTCG_MSIL", 0x0083: "Utc1500_C", 0x0084: "Utc1500_CPP", 0x0085: "Utc1500_C_Std", 0x0086: "Utc1500_CPP_Std", 0x0087: "Utc1500_CVTCIL_C", 0x0088: "Utc1500_CVTCIL_CPP", 0x0089: "Utc1500_LTCG_C", 0x008a: "Utc1500_LTCG_CPP", 0x008b: "Utc1500_LTCG_MSIL", 0x008c: "Utc1500_POGO_I_C", 0x008d: "Utc1500_POGO_I_CPP", 0x008e: "Utc1500_POGO_O_C", 0x008f: "Utc1500_POGO_O_CPP", 0x0090: "Cvtpgd1500", 0x0091: "Linker900", 0x0092: "Export900", 0x0093: "Implib900", 0x0094: "Cvtres900", 0x0095: "Masm900", 0x0096: "AliasObj900", 0x0097: "Resource", 0x0098: "AliasObj1000", 0x0099: "Cvtpgd1600", 0x009a: "Cvtres1000", 0x009b: "Export1000", 0x009c: "Implib1000", 0x009d: "Linker1000", 0x009e: "Masm1000", 0x009f: "Phx1600_C", 0x00a0: "Phx1600_CPP", 0x00a1: "Phx1600_CVTCIL_C", 0x00a2: "Phx1600_CVTCIL_CPP", 0x00a3: "Phx1600_LTCG_C", 0x00a4: "Phx1600_LTCG_CPP", 0x00a5: "Phx1600_LTCG_MSIL", 0x00a6: "Phx1600_POGO_I_C", 0x00a7: "Phx1600_POGO_I_CPP", 0x00a8: "Phx1600_POGO_O_C", 0x00a9: "Phx1600_POGO_O_CPP", 0x00aa: "Utc1600_C", 0x00ab: "Utc1600_CPP", 0x00ac: "Utc1600_CVTCIL_C", 0x00ad: "Utc1600_CVTCIL_CPP", 0x00ae: "Utc1600_LTCG_C", 0x00af: "Utc1600_LTCG_CPP", 0x00b0: "Utc1600_LTCG_MSIL", 0x00b1: "Utc1600_POGO_I_C", 0x00b2: "Utc1600_POGO_I_CPP", 0x00b3: "Utc1600_POGO_O_C", 0x00b4: "Utc1600_POGO_O_CPP", 0x00b5: "AliasObj1010", 0x00b6: "Cvtpgd1610", 0x00b7: "Cvtres1010", 0x00b8: "Export1010", 0x00b9: "Implib1010", 0x00ba: "Linker1010", 0x00bb: "Masm1010", 0x00bc: "Utc1610_C", 0x00bd: "Utc1610_CPP", 0x00be: "Utc1610_CVTCIL_C", 0x00bf: "Utc1610_CVTCIL_CPP", 0x00c0: "Utc1610_LTCG_C", 0x00c1: "Utc1610_LTCG_CPP", 0x00c2: "Utc1610_LTCG_MSIL", 0x00c3: "Utc1610_POGO_I_C", 0x00c4: "Utc1610_POGO_I_CPP", 0x00c5: "Utc1610_POGO_O_C", 0x00c6: "Utc1610_POGO_O_CPP", 0x00c7: "AliasObj1100", 0x00c8: "Cvtpgd1700", 0x00c9: "Cvtres1100", 0x00ca: "Export1100", 0x00cb: "Implib1100", 0x00cc: "Linker1100", 0x00cd: "Masm1100", 0x00ce: "Utc1700_C", 0x00cf: "Utc1700_CPP", 0x00d0: "Utc1700_CVTCIL_C", 0x00d1: "Utc1700_CVTCIL_CPP", 0x00d2: "Utc1700_LTCG_C", 0x00d3: "Utc1700_LTCG_CPP", 0x00d4: "Utc1700_LTCG_MSIL", 0x00d5: "Utc1700_POGO_I_C", 0x00d6: "Utc1700_POGO_I_CPP", 0x00d7: "Utc1700_POGO_O_C", 0x00d8: "Utc1700_POGO_O_CPP", 0x00d9: "AliasObj1200", 0x00da: "Cvtpgd1800", 0x00db: "Cvtres1200", 0x00dc: "Export1200", 0x00dd: "Implib1200", 0x00de: "Linker1200", 0x00df: "Masm1200", 0x00e0: "Utc1800_C", 0x00e1: "Utc1800_CPP", 0x00e2: "Utc1800_CVTCIL_C", 0x00e3: "Utc1800_CVTCIL_CPP", 0x00e4: "Utc1800_LTCG_C", 0x00e5: "Utc1800_LTCG_CPP", 0x00e6: "Utc1800_LTCG_MSIL", 0x00e7: "Utc1800_POGO_I_C", 0x00e8: "Utc1800_POGO_I_CPP", 0x00e9: "Utc1800_POGO_O_C", 0x00ea: "Utc1800_POGO_O_CPP", 0x00eb: "AliasObj1210", 0x00ec: "Cvtpgd1810", 0x00ed: "Cvtres1210", 0x00ee: "Export1210", 0x00ef: "Implib1210", 0x00f0: "Linker1210", 0x00f1: "Masm1210", 0x00f2: "Utc1810_C", 0x00f3: "Utc1810_CPP", 0x00f4: "Utc1810_CVTCIL_C", 0x00f5: "Utc1810_CVTCIL_CPP", 0x00f6: "Utc1810_LTCG_C", 0x00f7: "Utc1810_LTCG_CPP", 0x00f8: "Utc1810_LTCG_MSIL", 0x00f9: "Utc1810_POGO_I_C", 0x00fa: "Utc1810_POGO_I_CPP", 0x00fb: "Utc1810_POGO_O_C", 0x00fc: "Utc1810_POGO_O_CPP", 0x00fd: "AliasObj1400", 0x00fe: "Cvtpgd1900", 0x00ff: "Cvtres1400", 0x0100: "Export1400", 0x0101: "Implib1400", 0x0102: "Linker1400", 0x0103: "Masm1400", 0x0104: "Utc1900_C", 0x0105: "Utc1900_CPP", 0x0106: "Utc1900_CVTCIL_C", 0x0107: "Utc1900_CVTCIL_CPP", 0x0108: "Utc1900_LTCG_C", 0x0109: "Utc1900_LTCG_CPP", 0x010a: "Utc1900_LTCG_MSIL", 0x010b: "Utc1900_POGO_I_C", 0x010c: "Utc1900_POGO_I_CPP", 0x010d: "Utc1900_POGO_O_C", 0x010e: "Utc1900_POGO_O_CPP", } if val, ok := prodIDtoStrMap[prodID]; ok { return val } return "?" } // ProdIDtoVSversion retrieves the Visual Studio version from product id. // list from: https://github.com/kirschju/richheader func ProdIDtoVSversion(prodID uint16) string { if prodID > 0x010e { return "" } else if prodID >= 0x00fd && prodID < 0x010e+1 { return "Visual Studio 2015 14.00" } else if prodID >= 0x00eb && prodID < 0x00fd { return "Visual Studio 2013 12.10" } else if prodID >= 0x00d9 && prodID < 0x00eb { return "Visual Studio 2013 12.00" } else if prodID >= 0x00c7 && prodID < 0x00d9 { return "Visual Studio 2012 11.00" } else if prodID >= 0x00b5 && prodID < 0x00c7 { return "Visual Studio 2010 10.10" } else if prodID >= 0x0098 && prodID < 0x00b5 { return "Visual Studio 2010 10.00" } else if prodID >= 0x0083 && prodID < 0x0098 { return "Visual Studio 2008 09.00" } else if prodID >= 0x006d && prodID < 0x0083 { return "Visual Studio 2005 08.00" } else if prodID >= 0x005a && prodID < 0x006d { return "Visual Studio 2003 07.10" } else if prodID == 1 { return "Visual Studio" } else { return "" } } ================================================ FILE: richheader_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) type TestRichHeader struct { richHeader RichHeader compIDIndex uint8 prettyProdID string VSVersion string checksum uint32 } func TestParseRichHeader(t *testing.T) { tests := []struct { in string out TestRichHeader }{ {getAbsoluteFilePath("test/kernel32.dll"), TestRichHeader{ richHeader: RichHeader{ XORKey: 2796214951, CompIDs: []CompID{ { MinorCV: 27412, ProdID: 257, Count: 4, Unmasked: 16870164, }, { MinorCV: 30729, ProdID: 147, Count: 193, Unmasked: 9664521, }, { MinorCV: 0, ProdID: 1, Count: 1325, Unmasked: 65536, }, { MinorCV: 27412, ProdID: 260, Count: 9, Unmasked: 17066772, }, { MinorCV: 27412, ProdID: 259, Count: 3, Unmasked: 17001236, }, { MinorCV: 27412, ProdID: 256, Count: 1, Unmasked: 16804628, }, { MinorCV: 27412, ProdID: 269, Count: 209, Unmasked: 17656596, }, { MinorCV: 27412, ProdID: 255, Count: 1, Unmasked: 16739092, }, { MinorCV: 27412, ProdID: 258, Count: 1, Unmasked: 16935700, }, }, DansOffset: 128, Raw: []byte{ 0xe3, 0xbb, 0xc4, 0xf5, 0xa7, 0xda, 0xaa, 0xa6, 0xa7, 0xda, 0xaa, 0xa6, 0xa7, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0xab, 0xa7, 0xa3, 0xda, 0xaa, 0xa6, 0xae, 0xa2, 0x39, 0xa6, 0x66, 0xda, 0xaa, 0xa6, 0xa7, 0xda, 0xab, 0xa6, 0x8a, 0xdf, 0xaa, 0xa6, 0xb3, 0xb1, 0xae, 0xa7, 0xae, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0xa9, 0xa7, 0xa4, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0xaa, 0xa7, 0xa6, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0xa7, 0xa7, 0x76, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0x55, 0xa6, 0xa6, 0xda, 0xaa, 0xa6, 0xb3, 0xb1, 0xa8, 0xa7, 0xa6, 0xda, 0xaa, 0xa6, 0x52, 0x69, 0x63, 0x68, 0xa7, 0xda, 0xaa, 0xa6}, }, compIDIndex: 3, prettyProdID: "Utc1900_C", VSVersion: "Visual Studio 2015 14.00", checksum: 0xa6aadaa7, }}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } richHeader := file.RichHeader if !reflect.DeepEqual(richHeader, tt.out.richHeader) { t.Errorf("rich header test failed, got %v, want %v", richHeader, tt.out) } prodID := richHeader.CompIDs[tt.out.compIDIndex].ProdID prettyProdID := ProdIDtoStr(prodID) if prettyProdID != tt.out.prettyProdID { t.Errorf("rich header pretty prod ID failed, got %v, want %v", prettyProdID, tt.out.prettyProdID) } VSVersion := ProdIDtoVSversion(prodID) if VSVersion != tt.out.VSVersion { t.Errorf("rich header VS verion of prod ID failed, got %v, want %v", VSVersion, tt.out.VSVersion) } checksum := file.RichHeaderChecksum() if checksum != tt.out.checksum { t.Errorf("rich header checksum failed, got %v, want %v", checksum, tt.out.checksum) } }) } } func TestRichHeaderHash(t *testing.T) { tests := []struct { in string out string }{ {getAbsoluteFilePath("test/kernel32.dll"), "4549320af6790d410f09ddc3bab86c86"}, {getAbsoluteFilePath("test/WdBoot.sys"), "3cbccbf62a2a6a8066a5c9d294c90948"}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } got := file.RichHeaderHash() if string(got) != tt.out { t.Errorf("Authentihash(%s) got %v, want %v", tt.in, got, tt.out) } }) } } ================================================ FILE: scripts/extract-rsrc-lang.py ================================================ # Text file containing languages and sub-languages extracted from: # Language Identifier Constants and Strings for Microsoft Windows doc. spec = "ms-lcid.txt" class Language: language = "" originalLanguage = "" id = 0 tag = "" isSubLang = False def __str__(self) -> str: return f"{self.originalLanguage} : {self.id} : {self.tag}" def sanitize_lang(language): language = language.replace(".", "") # example: U.A.E. language = language.replace("(", "") # example: (Latin) language = language.replace(")", "") # example: (Latin) language = language.replace("'", "") # example: People's Republic of China language = language.replace("[", "") # example: Cocos [Keeling] Islands language = language.replace("]", "") # example: Cocos [Keeling] Islands language = language.replace("-", "") # example: Guinea-Bissau language = language.replace("/", "") # example: # Pseudo locale for east Asian/complex script localization testing language = language.replace(" ", "") # example: Congo, DRC language = language.replace(",", "") # example: Congo, DRC return language def read_lang_ids(filename): lines = [] with open(filename, 'r', encoding="utf-8") as f: lines = f.readlines() lang_ids = [] for line in lines: elements = line.split() lang_ids.append(elements[0]) return lang_ids def parse_txt_file(filename, lang_ids): lines = [] with open(filename, 'r', encoding="utf-8") as f: lines = f.readlines() languages = [] for line in lines: lang = Language() line = line.strip() elements = line.split() lang.tag = elements[-1] lang.id = elements[-2] if "-" not in lang.tag: lang.isSubLang = False else: if not lang.id in lang_ids: lang.isSubLang = True i = 0 while i < len(elements) - 2: for letter in ["(", "["]: if elements[i].startswith(letter): # Capitalize words so golang is happy. lang.originalLanguage += letter + elements[i][1:].capitalize() + " " break else: lang.originalLanguage += elements[i].capitalize() + " " break i += 1 begin = lang.originalLanguage.find("-") if begin > 0: lang.originalLanguage = lang.originalLanguage[:begin+1] + \ lang.originalLanguage[begin+1:begin+3].capitalize() + lang.originalLanguage[begin+3:] # Strip the last whitespace. lang.originalLanguage = lang.originalLanguage[:-1] lang.language = sanitize_lang(lang.originalLanguage) # Skip unsupported locals. if lang.id == "0x1000": print (f"skipping {lang}") continue languages.append(lang) return languages def generate_go_code(languages : list[Language]): code = "" # Generate langs constants for lang in languages: if lang.isSubLang: continue else: code += f"// {lang.originalLanguage} ({lang.tag})\n" code += f"Lang{lang.language} ResourceLang = {lang.id}\n" # Generate sub-langs constants i = 0 for lang in languages: if lang.isSubLang: code += f"// {lang.originalLanguage} ({lang.tag})\n" code += f"SubLang{lang.language}\n" i += 1 return code def generate_lang_string(languages : list[Language]): code = "" for lang in languages: if lang.isSubLang: continue code += f'Lang{lang.language} : "{lang.originalLanguage} ({lang.tag})",\n' return code def generate_sub_lang_string(languages : list[Language]): code = "" for lang in languages: if not lang.isSubLang: continue code += f'SubLang{lang.language} : "{lang.originalLanguage} ({lang.tag})",\n' return code def generate_lang_sub_lang_map_string(languages : list[Language]): code = "" curly_bracket_is_open = False # The following tags don't have a location. ignore_list = ["0x0476", "0x05FE", "0x0501", "0x09FF", "0x043D", "0x0471", "0x045F", "0x7C67"] for lang in languages: if lang.id in ignore_list: continue if not lang.isSubLang: if curly_bracket_is_open: code += f"}},\n" code += f"Lang{lang.language} : {{\n" curly_bracket_is_open = True else: id = int(lang.id, 0) >> 10 code += f' 0x{id:x} : SubLang{lang.language}.String(),\n' return code def write_generated_code(code, filename): with open(filename, "w", encoding="utf-8") as f: f.write(code) if __name__ == "__main__": lang_ids = read_lang_ids("lang_ids.txt") languages = parse_txt_file(spec, lang_ids) code = generate_go_code(languages) write_generated_code(code, "out.txt") code = generate_lang_string(languages) langs = write_generated_code(code, "langs.txt") code = generate_sub_lang_string(languages) langs = write_generated_code(code, "sub_langs.txt") code = generate_lang_sub_lang_map_string(languages) langs = write_generated_code(code, "map.txt") ================================================ FILE: scripts/ms-lcid.txt ================================================ Afar 0x1000 aa Afar Djibouti 0x1000 aa-DJ Afar Eritrea 0x1000 aa-ER Afar Ethiopia 0x1000 aa-ET Afrikaans 0x0036 af Afrikaans Namibia 0x1000 af-NA Afrikaans South Africa 0x0436 af-ZA Aghem 0x1000 agq Aghem Cameroon 0x1000 agq-CM Akan 0x1000 ak Akan Ghana 0x1000 ak-GH Albanian 0x001C sq Albanian Albania 0x041C sq-AL Albanian North Macedonia 0x1000 sq-MK Alsatian 0x0084 gsw Alsatian France 0x0484 gsw-FR Alsatian Liechtenstein 0x1000 gsw-LI Alsatian Switzerland 0x1000 gsw-CH Amharic 0x005E am Amharic Ethiopia 0x045E am-ET Arabic 0x0001 ar Arabic Algeria 0x1401 ar-DZ Arabic Bahrain 0x3C01 ar-BH Arabic Chad 0x1000 ar-TD Arabic Comoros 0x1000 ar-KM Arabic Djibouti 0x1000 ar-DJ Arabic Egypt 0x0c01 ar-EG Arabic Eritrea 0x1000 ar-ER Arabic Iraq 0x0801 ar-IQ Arabic Israel 0x1000 ar-IL Arabic Jordan 0x2C01 ar-JO Arabic Kuwait 0x3401 ar-KW Arabic Lebanon 0x3001 ar-LB Arabic Libya 0x1001 ar-LY Arabic Mauritania 0x1000 ar-MR Arabic Morocco 0x1801 ar-MA Arabic Oman 0x2001 ar-OM Arabic Palestinian Authority 0x1000 ar-PS Arabic Qatar 0x4001 ar-QA Arabic Saudi Arabia 0x0401 ar-SA Arabic Somalia 0x1000 ar-SO Arabic South Sudan 0x1000 ar-SS Arabic Sudan 0x1000 ar-SD Arabic Syria 0x2801 ar-SY Arabic Tunisia 0x1C01 ar-TN Arabic U.A.E. 0x3801 ar-AE Arabic World 0x1000 ar-001 Arabic Yemen 0x2401 ar-YE Armenian 0x002B hy Armenian Armenia 0x042B hy-AM Assamese 0x004D as Assamese India 0x044D as-IN Asturian 0x1000 ast Asturian Spain 0x1000 ast-ES Asu 0x1000 asa Asu Tanzania 0x1000 asa-TZ Azerbaijani (Cyrillic) 0x742C az-Cyrl Azerbaijani (Cyrillic) Azerbaijan 0x082C az-Cyrl-AZ Azerbaijani (Latin) 0x002C az Azerbaijani (Latin) 0x782C az-Latn Azerbaijani (Latin) Azerbaijan 0x042C az-Latn-AZ Bafia 0x1000 ksf Bafia Cameroon 0x1000 ksf-CM Bamanankan 0x1000 bm Bamanankan (Latin) Mali 0x1000 bm-Latn-ML Bangla 0x0045 bn Bangla Bangladesh 0x0845 bn-BD Bangla India 0x0445 bn-IN Basaa 0x1000 bas Basaa Cameroon 0x1000 bas-CM Bashkir 0x006D ba Bashkir Russia 0x046D ba-RU Basque 0x002D eu Basque Spain 0x042D eu-ES Belarusian 0x0023 be Belarusian Belarus 0x0423 be-BY Bemba 0x1000 bem Bemba Zambia 0x1000 bem-ZM Bena 0x1000 bez Bena Tanzania 0x1000 bez-TZ Blin 0x1000 byn Blin Eritrea 0x1000 byn-ER Bodo 0x1000 brx Bodo India 0x1000 brx-IN Bosnian (Cyrillic) 0x641A bs-Cyrl Bosnian (Cyrillic) Bosnia and Herzegovina 0x201A bs-Cyrl-BA Bosnian (Latin) 0x681A bs-Latn Bosnian (Latin) 0x781A bs Bosnian (Latin) Bosnia and Herzegovina 0x141A bs-Latn-BA Breton 0x007E br Breton France 0x047E br-FR Bulgarian 0x0002 bg Bulgarian Bulgaria 0x0402 bg-BG Burmese 0x0055 my Burmese Myanmar 0x0455 my-MM Catalan 0x0003 ca Catalan Andorra 0x1000 ca-AD Catalan France 0x1000 ca-FR Catalan Italy 0x1000 ca-IT Catalan Spain 0x0403 ca-ES Cebuano 0x1000 ceb Cebuan (Latin) 0x1000 ceb-Latn Cebuan (Latin) Philippines 0x1000 ceb-Latn-PH Central Atlas Tamazight (Arabic) Morocco 0x045F tzm-ArabMA Central Atlas Tamazight (Latin) Morocco 0x1000 tzm-LatnMA Central Kurdish 0x0092 ku Central Kurdish 0x7c92 ku-Arab Central Kurdish Iraq 0x0492 ku-Arab-IQ Chakma 0x1000 ccp Chakma Chakma 0x1000 ccp-Cakm Chakma Bangladesh 0x1000 ccp-CakmBD Chakma India 0x1000 ccp-CakmIN Chechen Russia 0x1000 ce-RU Cherokee 0x005C chr Cherokee 0x7c5C chr-Cher Cherokee United States 0x045C chr-Cher-US Chiga 0x1000 cgg Chiga Uganda 0x1000 cgg-UG Chinese (Simplified) 0x0004 zh-Hans Chinese (Simplified) 0x7804 zh Chinese (Simplified) People's Republic of China 0x0804 zh-CN Chinese (Simplified) Singapore 0x1004 zh-SG Chinese (Traditional) 0x7C04 zh-Hant Chinese (Traditional) Hong Kong S.A.R. 0x0C04 zh-HK Chinese (Traditional) Macao S.A.R. 0x1404 zh-MO Chinese (Traditional) Taiwan 0x0404 zh-TW Church Slavic Russia 0x1000 cu-RU Congo Swahili 0x1000 swc Congo Swahili Congo DRC 0x1000 swc-CD Cornish 0x1000 kw Cornish United Kingdom 0x1000 kw-GB Corsican 0x0083 co Corsican France 0x0483 co-FR Croatian 0x001A hr Croatian Croatia 0x041A hr-HR Croatian (Latin) Bosnia and Herzegovina 0x101A hr-BA Czech 0x0005 cs Czech Czech Republic 0x0405 cs-CZ Danish 0x0006 da Danish Denmark 0x0406 da-DK Danish Greenland 0x1000 da-GL Dari 0x008C prs Dari Afghanistan 0x048C prs-AF Divehi 0x0065 dv Divehi Maldives 0x0465 dv-MV Duala 0x1000 dua Duala Cameroon 0x1000 dua-CM Dutch 0x0013 nl Dutch Aruba 0x1000 nl-AW Dutch Belgium 0x0813 nl-BE Dutch Bonaire, Sint Eustatius and Saba 0x1000 nl-BQ Dutch Curaçao 0x1000 nl-CW Dutch Netherlands 0x0413 nl-NL Dutch Sint Maarten 0x1000 nl-SX Dutch Suriname 0x1000 nl-SR Dzongkha 0x1000 dz Dzongkha Bhutan 0x0C51 dz-BT Embu 0x1000 ebu Embu Kenya 0x1000 ebu-KE English 0x0009 en English American Samoa 0x1000 en-AS English Anguilla 0x1000 en-AI English Antigua and Barbuda 0x1000 en-AG English Australia 0x0C09 en-AU English Austria 0x1000 en-AT English Bahamas 0x1000 en-BS English Barbados 0x1000 en-BB English Belgium 0x1000 en-BE English Belize 0x2809 en-BZ English Bermuda 0x1000 en-BM English Botswana 0x1000 en-BW English British Indian Ocean Territory 0x1000 en-IO English British Virgin Islands 0x1000 en-VG English Burundi 0x1000 en-BI English Cameroon 0x1000 en-CM English Canada 0x1009 en-CA English Caribbean 0x2409 en-029 English Cayman Islands 0x1000 en-KY English Christmas Island 0x1000 en-CX English Cocos [Keeling] Islands 0x1000 en-CC English Cook Islands 0x1000 en-CK English Cyprus 0x1000 en-CY English Denmark 0x1000 en-DK English Dominica 0x1000 en-DM English Eritrea 0x1000 en-ER English Europe 0x1000 en-150 English Falkland Islands 0x1000 en-FK English Finland 0x1000 en-FI English Fiji 0x1000 en-FJ English Gambia 0x1000 en-GM English Germany 0x1000 en-DE English Ghana 0x1000 en-GH English Gibraltar 0x1000 en-GI English Grenada 0x1000 en-GD English Guam 0x1000 en-GU English Guernsey 0x1000 en-GG English Guyana 0x1000 en-GY English Hong Kong 0x3C09 en-HK English India 0x4009 en-IN English Ireland 0x1809 en-IE English Isle of Man 0x1000 en-IM English Israel 0x1000 en-IL English Jamaica 0x2009 en-JM English Jersey 0x1000 en-JE English Kenya 0x1000 en-KE English Kiribati 0x1000 en-KI English Lesotho 0x1000 en-LS English Liberia 0x1000 en-LR English Macao SAR 0x1000 en-MO English Madagascar 0x1000 en-MG English Malawi 0x1000 en-MW English Malaysia 0x4409 en-MY English Malta 0x1000 en-MT English Marshall Islands 0x1000 en-MH English Mauritius 0x1000 en-MU English Micronesia 0x1000 en-FM English Montserrat 0x1000 en-MS English Namibia 0x1000 en-NA English Nauru 0x1000 en-NR English Netherlands 0x1000 en-NL English New Zealand 0x1409 en-NZ English Nigeria 0x1000 en-NG English Niue 0x1000 en-NU English Norfolk Island 0x1000 en-NF English Northern Mariana Islands 0x1000 en-MP English Pakistan 0x1000 en-PK English Palau 0x1000 en-PW English Papua New Guinea 0x1000 en-PG English Pitcairn Islands 0x1000 en-PN English Puerto Rico 0x1000 en-PR English Republic of the Philippines 0x3409 en-PH English Rwanda 0x1000 en-RW English Saint Kitts and Nevis 0x1000 en-KN English Saint Lucia 0x1000 en-LC English Saint Vincent and the Grenadines 0x1000 en-VC English Samoa 0x1000 en-WS English Seychelles 0x1000 en-SC English Sierra Leone 0x1000 en-SL English Singapore 0x4809 en-SG English Sint Maarten 0x1000 en-SX English Slovenia 0x1000 en-SI English Solomon Islands 0x1000 en-SB English South Africa 0x1C09 en-ZA English South Sudan 0x1000 en-SS English St Helena, Ascension, Tristan da Cunha 0x1000 en-SH English Sudan 0x1000 en-SD English Swaziland 0x1000 en-SZ English Sweden 0x1000 en-SE English Switzerland 0x1000 en-CH English Tanzania 0x1000 en-TZ English Tokelau 0x1000 en-TK English Tonga 0x1000 en-TO English Trinidad and Tobago 0x2c09 en-TT English Turks and Caicos Islands 0x1000 en-TC English Tuvalu 0x1000 en-TV English Uganda 0x1000 en-UG English United Arab Emirates 0x4C09 en-AE English United Kingdom 0x0809 en-GB English United States 0x0409 en-US English US Minor Outlying Islands 0x1000 en-UM English US Virgin Islands 0x1000 en-VI English Vanuatu 0x1000 en-VU English World 0x1000 en-001 English Zambia 0x1000 en-ZM English Zimbabwe 0x3009 en-ZW Esperanto 0x1000 eo Esperanto World 0x1000 eo-001 Estonian 0x0025 et Estonian Estonia 0x0425 et-EE Ewe 0x1000 ee Ewe Ghana 0x1000 ee-GH Ewe Togo 0x1000 ee-TG Ewondo 0x1000 ewo Ewondo Cameroon 0x1000 ewo-CM Faroese 0x0038 fo Faroese Denmark 0x1000 fo-DK Faroese Faroe Islands 0x0438 fo-FO Filipino 0x0064 fil Filipino Philippines 0x0464 fil-PH Finnish 0x000B fi Finnish Finland 0x040B fi-FI French 0x000C fr French Algeria 0x1000 fr-DZ French Belgium 0x080C fr-BE French Benin 0x1000 fr-BJ French Burkina Faso 0x1000 fr-BF French Burundi 0x1000 fr-BI French Cameroon 0x2c0C fr-CM French Canada 0x0c0C fr-CA French Caribbean 0x1C0C fr-029 French Central African Republic 0x1000 fr-CF French Chad 0x1000 fr-TD French Comoros 0x1000 fr-KM French Congo 0x1000 fr-CG French Congo, DRC 0x240C fr-CD French Côte d'Ivoire 0x300C fr-CI French Djibouti 0x1000 fr-DJ French Equatorial Guinea 0x1000 fr-GQ French France 0x040C fr-FR French French Guiana 0x1000 fr-GF French French Polynesia 0x1000 fr-PF French Gabon 0x1000 fr-GA French Guadeloupe 0x1000 fr-GP French Guinea 0x1000 fr-GN French Haiti 0x3c0C fr-HT French Luxembourg 0x140C fr-LU French Madagascar 0x1000 fr-MG French Mali 0x340C fr-ML French Martinique 0x1000 fr-MQ French Mauritania 0x1000 fr-MR French Mauritius 0x1000 fr-MU French Mayotte 0x1000 fr-YT French Morocco 0x380C fr-MA French New Caledonia 0x1000 fr-NC French Niger 0x1000 fr-NE French Principality of Monaco 0x180C fr-MC French Reunion 0x200C fr-RE French Rwanda 0x1000 fr-RW French Saint Barthélemy 0x1000 fr-BL French Saint Martin 0x1000 fr-MF French Saint Pierre and Miquelon 0x1000 fr-PM French Senegal 0x280C fr-SN French Seychelles 0x1000 fr-SC French Switzerland 0x100C fr-CH French Syria 0x1000 fr-SY French Togo 0x1000 fr-TG French Tunisia 0x1000 fr-TN French Vanuatu 0x1000 fr-VU French Wallis and Futuna 0x1000 fr-WF Frisian 0x0062 fy Frisian Netherlands 0x0462 fy-NL Friulian 0x1000 fur Friulian Italy 0x1000 fur-IT Fulah 0x0067 ff Fulah (Latin) 0x7C67 ff-Latn Fulah (Latin) Burkina Faso 0x1000 ff-Latn-BF Fulah Cameroon 0x1000 ff-CM Fulah (Latin) Cameroon 0x1000 ff-Latn-CM Fulah (Latin) Gambia 0x1000 ff-Latn-GM Fulah (Latin) Ghana 0x1000 ff-Latn-GH Fulah Guinea 0x1000 ff-GN Fulah (Latin) Guinea 0x1000 ff-Latn-GN Fulah (Latin) Guinea-Bissau 0x1000 ff-Latn-GW Fulah (Latin) Liberia 0x1000 ff-Latn-LR Fulah Mauritania 0x1000 ff-MR Fulah (Latin) Mauritania 0x1000 ff-Latn-MR Fulah (Latin) Niger 0x1000 ff-Latn-NE Fulah Nigeria 0x0467 ff-NG Fulah (Latin) Nigeria 0x0467 ff-Latn-NG Fulah Senegal 0x0867 ff-Latn-SN Fulah (Latin) Sierra Leone 0x1000 ff-Latn-SL Galician 0x0056 gl Galician Spain 0x0456 gl-ES Ganda 0x1000 lg Ganda Uganda 0x1000 lg-UG Georgian 0x0037 ka Georgian Georgia 0x0437 ka-GE German 0x0007 de German Austria 0x0C07 de-AT German Belgium 0x1000 de-BE German Germany 0x0407 de-DE German Italy 0x1000 de-IT German Liechtenstein 0x1407 de-LI German Luxembourg 0x1007 de-LU German Switzerland 0x0807 de-CH Greek 0x0008 el Greek Cyprus 0x1000 el-CY Greek Greece 0x0408 el-GR Greenlandic 0x006F kl Greenlandic Greenland 0x046F kl-GL Guarani 0x0074 gn Guarani Paraguay 0x0474 gn-PY Gujarati 0x0047 gu Gujarati India 0x0447 gu-IN Gusii 0x1000 guz Gusii Kenya 0x1000 guz-KE Hausa (Latin) 0x0068 ha Hausa (Latin) 0x7C68 ha-Latn Hausa (Latin) Ghana 0x1000 ha-Latn-GH Hausa (Latin) Niger 0x1000 ha-Latn-NE Hausa (Latin) Nigeria 0x0468 ha-Latn-NG Hawaiian 0x0075 haw Hawaiian United States 0x0475 haw-US Hebrew 0x000D he Hebrew Israel 0x040D he-IL Hindi 0x0039 hi Hindi India 0x0439 hi-IN Hungarian 0x000E hu Hungarian Hungary 0x040E hu-HU Icelandic 0x000F is Icelandic Iceland 0x040F is-IS Igbo 0x0070 ig Igbo Nigeria 0x0470 ig-NG Indonesian 0x0021 id Indonesian Indonesia 0x0421 id-ID Interlingua 0x1000 ia Interlingua France 0x1000 ia-FR Interlingua World 0x1000 ia-001 Inuktitut (Latin) 0x005D iu Inuktitut (Latin) 0x7C5D iu-Latn Inuktitut (Latin) Canada 0x085D iu-Latn-CA Inuktitut (Syllabics) 0x785D iu-Cans Inuktitut (Syllabics) Canada 0x045d iu-Cans-CA Irish 0x003C ga Irish Ireland 0x083C ga-IE Italian 0x0010 it Italian Italy 0x0410 it-IT Italian San Marino 0x1000 it-SM Italian Switzerland 0x0810 it-CH Italian Vatican City 0x1000 it-VA Japanese 0x0011 ja Japanese Japan 0x0411 ja-JP Javanese 0x1000 jv Javanese Latin 0x1000 jv-Latn Javanese Latin, Indonesia 0x1000 jv-Latn-ID Jola-Fonyi 0x1000 dyo Jola-Fonyi Senegal 0x1000 dyo-SN Kabuverdianu 0x1000 kea Kabuverdianu Cabo Verde 0x1000 kea-CV Kabyle 0x1000 kab Kabyle Algeria 0x1000 kab-DZ Kako 0x1000 kkj Kako Cameroon 0x1000 kkj-CM Kalenjin 0x1000 kln Kalenjin Kenya 0x1000 kln-KE Kamba 0x1000 kam Kamba Kenya 0x1000 kam-KE Kannada 0x004B kn Kannada India 0x044B kn-IN Kanuri (Latin) Nigeria 0x0471 kr-Latn-NG Kashmiri 0x0060 ks Kashmiri Perso-Arabic 0x0460 ks-Arab Kashmiri Perso-Arabic 0x1000 ks-Arab-IN Kashmiri (Devanagari) India 0x0860 ks-Deva-IN Kazakh 0x003F kk Kazakh Kazakhstan 0x043F kk-KZ Khmer 0x0053 km Khmer Cambodia 0x0453 km-KH K'iche 0x0086 quc K'iche Guatemala 0x0486 quc-Latn-GT Kikuyu 0x1000 ki Kikuyu Kenya 0x1000 ki-KE Kinyarwanda 0x0087 rw Kinyarwanda Rwanda 0x0487 rw-RW Kiswahili 0x0041 sw Kiswahili Kenya 0x0441 sw-KE Kiswahili Tanzania 0x1000 sw-TZ Kiswahili Uganda 0x1000 sw-UG Konkani 0x0057 kok Konkani India 0x0457 kok-IN Korean 0x0012 ko Korean Korea 0x0412 ko-KR Korean North Korea 0x1000 ko-KP Koyra Chiini 0x1000 khq Koyra Chiini Mali 0x1000 khq-ML Koyraboro Senni 0x1000 ses Koyraboro Senni Mali 0x1000 ses-ML Kwasio 0x1000 nmg Kwasio Cameroon 0x1000 nmg-CM Kyrgyz 0x0040 ky Kyrgyz Kyrgyzstan 0x0440 ky-KG Kurdish Perso-Arabic, Iran 0x1000 ku-Arab-IR Lakota 0x1000 lkt Lakota United States 0x1000 lkt-US Langi 0x1000 lag Langi Tanzania 0x1000 lag-TZ Lao 0x0054 lo Lao Lao P.D.R. 0x0454 lo-LA Latin Vatican City 0x0476 la-VA Latvian 0x0026 lv Latvian Latvia 0x0426 lv-LV Lingala 0x1000 ln Lingala Angola 0x1000 ln-AO Lingala Central African Republic 0x1000 ln-CF Lingala Congo 0x1000 ln-CG Lingala Congo DRC 0x1000 ln-CD Lithuanian 0x0027 lt Lithuanian Lithuania 0x0427 lt-LT Low German 0x1000 nds Low German Germany 0x1000 nds-DE Low German Netherlands 0x1000 nds-NL Lower Sorbian 0x7C2E dsb Lower Sorbian Germany 0x082E dsb-DE Luba-Katanga 0x1000 lu Luba-Katanga Congo DRC 0x1000 lu-CD Luo 0x1000 luo Luo Kenya 0x1000 luo-KE Luxembourgish 0x006E lb Luxembourgish Luxembourg 0x046E lb-LU Luyia 0x1000 luy Luyia Kenya 0x1000 luy-KE Macedonian 0x002F mk Macedonian North Macedonia 0x042F mk-MK Machame 0x1000 jmc Machame Tanzania 0x1000 jmc-TZ Makhuwa-Meetto 0x1000 mgh Makhuwa-Meetto Mozambique 0x1000 mgh-MZ Makonde 0x1000 kde Makonde Tanzania 0x1000 kde-TZ Malagasy 0x1000 mg Malagasy Madagascar 0x1000 mg-MG Malay 0x003E ms Malay Brunei Darussalam 0x083E ms-BN Malay Malaysia 0x043E ms-MY Malayalam 0x004C ml Malayalam India 0x044C ml-IN Maltese 0x003A mt Maltese Malta 0x043A mt-MT Manx 0x1000 gv Manx Isle of Man 0x1000 gv-IM Maori 0x0081 mi Maori New Zealand 0x0481 mi-NZ Mapudungun 0x007A arn Mapudungun Chile 0x047A arn-CL Marathi 0x004E mr Marathi India 0x044E mr-IN Masai 0x1000 mas Masai Kenya 0x1000 mas-KE Masai Tanzania 0x1000 mas-TZ Mazanderani Iran 0x1000 mzn-IR Meru 0x1000 mer Meru Kenya 0x1000 mer-KE Meta' 0x1000 mgo Meta' Cameroon 0x1000 mgo-CM Mohawk 0x007C moh Mohawk Canada 0x047C moh-CA Mongolian (Cyrillic) 0x0050 mn Mongolian (Cyrillic) 0x7850 mn-Cyrl Mongolian (Cyrillic) Mongolia 0x0450 mn-MN Mongolian (Traditional Mongolian) 0x7C50 mn-Mong Mongolian (Traditional Mongolian) People's Republic of China 0x0850 mn-MongCN Mongolian (Traditional Mongolian) Mongolia 0x0C50 mn-MongMN Morisyen 0x1000 mfe Morisyen Mauritius 0x1000 mfe-MU Mundang 0x1000 mua Mundang Cameroon 0x1000 mua-CM N'ko 0x1000 nqo N'ko Guinea 0x1000 nqo-GN Nama 0x1000 naq Nama Namibia 0x1000 naq-NA Nepali 0x0061 ne Nepali India 0x0861 ne-IN Nepali Nepal 0x0461 ne-NP Ngiemboon 0x1000 nnh Ngiemboon Cameroon 0x1000 nnh-CM Ngomba 0x1000 jgo Ngomba Cameroon 0x1000 jgo-CM Northern Luri Iraq 0x1000 lrc-IQ Northern Luri Iran 0x1000 lrc-IR North Ndebele 0x1000 nd North Ndebele Zimbabwe 0x1000 nd-ZW Norwegian (Bokmal) 0x0014 no Norwegian (Bokmal) 0x7C14 nb Norwegian (Bokmal) Norway 0x0414 nb-NO Norwegian (Nynorsk) 0x7814 nn Norwegian (Nynorsk) Norway 0x0814 nn-NO Norwegian Bokmål Svalbard and Jan Mayen 0x1000 nb-SJ Nuer 0x1000 nus Nuer Sudan 0x1000 nus-SD Nuer South Sudan 0x1000 nus-SS Nyankole 0x1000 nyn Nyankole Uganda 0x1000 nyn-UG Occitan 0x0082 oc Occitan France 0x0482 oc-FR Odia 0x0048 or Odia India 0x0448 or-IN Oromo 0x0072 om Oromo Ethiopia 0x0472 om-ET Oromo Kenya 0x1000 om-KE Ossetian 0x1000 os Ossetian Cyrillic, Georgia 0x1000 os-GE Ossetian Cyrillic, Russia 0x1000 os-RU Pashto 0x0063 ps Pashto Afghanistan 0x0463 ps-AF Pashto Pakistan 0x1000 ps-PK Persian 0x0029 fa Persian Afghanistan 0x1000 fa-AF Persian Iran 0x0429 fa-IR Polish 0x0015 pl Polish Poland 0x0415 pl-PL Portuguese 0x0016 pt Portuguese Angola 0x1000 pt-AO Portuguese Brazil 0x0416 pt-BR Portuguese Cabo Verde 0x1000 pt-CV Portuguese Equatorial Guinea 0x1000 pt-GQ Portuguese Guinea-Bissau 0x1000 pt-GW Portuguese Luxembourg 0x1000 pt-LU Portuguese Macao SAR 0x1000 pt-MO Portuguese Mozambique 0x1000 pt-MZ Portuguese Portugal 0x0816 pt-PT Portuguese São Tomé and Príncipe 0x1000 pt-ST Portuguese Switzerland 0x1000 pt-CH Portuguese Timor-Leste 0x1000 pt-TL Prussian 0x1000 prg-001 Pseudo Language Pseudo locale for east Asian/complex script localization testing 0x05FE qps-ploca Pseudo Language Pseudo locale used for localization testing 0x0501 qps-ploc Pseudo Language Pseudo locale used for localization testing of mirrored locales 0x09FF qps-plocm Punjabi 0x0046 pa Punjabi 0x7C46 pa-Arab Punjabi India 0x0446 pa-IN Punjabi Islamic Republic of Pakistan 0x0846 pa-Arab-PK Quechua 0x006B quz Quechua Bolivia 0x046B quz-BO Quechua Ecuador 0x086B quz-EC Quechua Peru 0x0C6B quz-PE Ripuarian 0x1000 ksh Ripuarian Germany 0x1000 ksh-DE Romanian 0x0018 ro Romanian Moldova 0x0818 ro-MD Romanian Romania 0x0418 ro-RO Romansh 0x0017 rm Romansh Switzerland 0x0417 rm-CH Rombo 0x1000 rof Rombo Tanzania 0x1000 rof-TZ Rundi 0x1000 rn Rundi Burundi 0x1000 rn-BI Russian 0x0019 ru Russian Belarus 0x1000 ru-BY Russian Kazakhstan 0x1000 ru-KZ Russian Kyrgyzstan 0x1000 ru-KG Russian Moldova 0x0819 ru-MD Russian Russia 0x0419 ru-RU Russian Ukraine 0x1000 ru-UA Rwa 0x1000 rwk Rwa Tanzania 0x1000 rwk-TZ Saho 0x1000 ssy Saho Eritrea 0x1000 ssy-ER Sakha 0x0085 sah Sakha Russia 0x0485 sah-RU Samburu 0x1000 saq Samburu Kenya 0x1000 saq-KE Sami (Inari) 0x703B smn Sami (Inari) Finland 0x243B smn-FI Sami (Lule) 0x7C3B smj Sami (Lule) Norway 0x103B smj-NO Sami (Lule) Sweden 0x143B smj-SE Sami (Northern) 0x003B se Sami (Northern) Finland 0x0C3B se-FI Sami (Northern) Norway 0x043B se-NO Sami (Northern) Sweden 0x083B se-SE Sami (Skolt) 0x743B sms Sami (Skolt) Finland 0x203B sms-FI Sami (Southern) 0x783B sma Sami (Southern) Norway 0x183B sma-NO Sami (Southern) Sweden 0x1C3B sma-SE Sango 0x1000 sg Sango Central African Republic 0x1000 sg-CF Sangu 0x1000 sbp Sangu Tanzania 0x1000 sbp-TZ Sanskrit 0x004F sa Sanskrit India 0x044F sa-IN Scottish Gaelic 0x0091 gd Scottish Gaelic United Kingdom 0x0491 gd-GB Sena 0x1000 seh Sena Mozambique 0x1000 seh-MZ Serbian (Cyrillic) 0x6C1A sr-Cyrl Serbian (Cyrillic) Bosnia and Herzegovina 0x1C1A sr-Cyrl-BA Serbian (Cyrillic) Montenegro 0x301A sr-Cyrl-ME Serbian (Cyrillic) Serbia 0x281A sr-Cyrl-RS Serbian (Cyrillic) Serbia and Montenegro (Former) 0x0C1A sr-Cyrl-CS Serbian (Latin) 0x701A sr-Latn Serbian (Latin) 0x7C1A sr Serbian (Latin) Bosnia and Herzegovina 0x181A sr-Latn-BA Serbian (Latin) Montenegro 0x2c1A sr-Latn-ME Serbian (Latin) Serbia 0x241A sr-Latn-RS Serbian (Latin) Serbia and Montenegro (Former) 0x081A sr-Latn-CS Sesotho sa Leboa 0x006C nso Sesotho sa Leboa South Africa 0x046C nso-ZA Setswana 0x0032 tn Setswana Botswana 0x0832 tn-BW Setswana South Africa 0x0432 tn-ZA Shambala 0x1000 ksb Shambala Tanzania 0x1000 ksb-TZ Shona 0x1000 sn Shona Latin 0x1000 sn-Latn Shona Zimbabwe 0x1000 sn-Latn-ZW Sindhi 0x0059 sd Sindhi 0x7C59 sd-Arab Sindhi Islamic Republic of Pakistan 0x0859 sd-Arab-PK Sinhala 0x005B si Sinhala Sri Lanka 0x045B si-LK Slovak 0x001B sk Slovak Slovakia 0x041B sk-SK Slovenian 0x0024 sl Slovenian Slovenia 0x0424 sl-SI Soga 0x1000 xog Soga Uganda 0x1000 xog-UG Somali 0x0077 so Somali Djibouti 0x1000 so-DJ Somali Ethiopia 0x1000 so-ET Somali Kenya 0x1000 so-KE Somali Somalia 0x0477 so-SO Sotho 0x0030 st Sotho South Africa 0x0430 st-ZA South Ndebele 0x1000 nr South Ndebele South Africa 0x1000 nr-ZA Southern Sotho Lesotho 0x1000 st-LS Spanish 0x000A es Spanish Argentina 0x2C0A es-AR Spanish Belize 0x1000 es-BZ Spanish Bolivarian Republic of Venezuela 0x200A es-VE Spanish Bolivia 0x400A es-BO Spanish Brazil 0x1000 es-BR Spanish Chile 0x340A es-CL Spanish Colombia 0x240A es-CO Spanish Costa Rica 0x140A es-CR Spanish Cuba 0x5c0A es-CU Spanish Dominican Republic 0x1c0A es-DO Spanish Ecuador 0x300A es-EC Spanish El Salvador 0x440A es-SV Spanish Equatorial Guinea 0x1000 es-GQ Spanish Guatemala 0x100A es-GT Spanish Honduras 0x480A es-HN Spanish Latin America 0x580A es-419 Spanish Mexico 0x080A es-MX Spanish Nicaragua 0x4C0A es-NI Spanish Panama 0x180A es-PA Spanish Paraguay 0x3C0A es-PY Spanish Peru 0x280A es-PE Spanish Philippines 0x1000 es-PH Spanish Puerto Rico 0x500A es-PR Spanish Spain 0x040A es-ES_tradnl Spanish Spain 0x0c0A es-ES Spanish United States 0x540A es-US Spanish Uruguay 0x380A es-UY Standard Moroccan Tamazight 0x1000 zgh Standard Moroccan Tamazight Morocco 0x1000 zgh-Tfng-MA Standard Moroccan Tamazight Tifinagh 0x1000 zgh-Tfng Swati 0x1000 ss Swati South Africa 0x1000 ss-ZA Swati Swaziland 0x1000 ss-SZ Swedish 0x001D sv Swedish Åland Islands 0x1000 sv-AX Swedish Finland 0x081D sv-FI Swedish Sweden 0x041D sv-SE Syriac 0x005A syr Syriac Syria 0x045A syr-SY Tachelhit 0x1000 shi Tachelhit Tifinagh 0x1000 shi-Tfng Tachelhit Tifinagh, Morocco 0x1000 shi-Tfng-MA Tachelhit (Latin) 0x1000 shi-Latn Tachelhit (Latin) Morocco 0x1000 shi-Latn-MA Taita 0x1000 dav Taita Kenya 0x1000 dav-KE Tajik (Cyrillic) 0x0028 tg Tajik (Cyrillic) 0x7C28 tg-Cyrl Tajik (Cyrillic) Tajikistan 0x0428 tg-Cyrl-TJ Tamazight (Latin) 0x005F tzm Tamazight (Latin) 0x7C5F tzm-Latn Tamazight (Latin) Algeria 0x085F tzm-Latn-DZ Tamil 0x0049 ta Tamil India 0x0449 ta-IN Tamil Malaysia 0x1000 ta-MY Tamil Singapore 0x1000 ta-SG Tamil Sri Lanka 0x0849 ta-LK Tasawaq 0x1000 twq Tasawaq Niger 0x1000 twq-NE Tatar 0x0044 tt Tatar Russia 0x0444 tt-RU Telugu 0x004A te Telugu India 0x044A te-IN Teso 0x1000 teo Teso Kenya 0x1000 teo-KE Teso Uganda 0x1000 teo-UG Thai 0x001E th Thai Thailand 0x041E th-TH Tibetan 0x0051 bo Tibetan India 0x1000 bo-IN Tibetan People's Republic of China 0x0451 bo-CN Tigre 0x1000 tig Tigre Eritrea 0x1000 tig-ER Tigrinya 0x0073 ti Tigrinya Eritrea 0x0873 ti-ER Tigrinya Ethiopia 0x0473 ti-ET Tongan 0x1000 to Tongan Tonga 0x1000 to-TO Tsonga 0x0031 ts Tsonga South Africa 0x0431 ts-ZA Turkish 0x001F tr Turkish Cyprus 0x1000 tr-CY Turkish Turkey 0x041F tr-TR Turkmen 0x0042 tk Turkmen Turkmenistan 0x0442 tk-TM Ukrainian 0x0022 uk Ukrainian Ukraine 0x0422 uk-UA Upper Sorbian 0x002E hsb Upper Sorbian Germany 0x042E hsb-DE Urdu 0x0020 ur Urdu India 0x0820 ur-IN Urdu Islamic Republic of Pakistan 0x0420 ur-PK Uyghur 0x0080 ug Uyghur People's Republic of China 0x0480 ug-CN Uzbek Perso-Arabic 0x1000 uz-Arab Uzbek Perso-Arabic, Afghanistan 0x1000 uz-Arab-AF Uzbek (Cyrillic) 0x7843 uz-Cyrl Uzbek (Cyrillic) Uzbekistan 0x0843 uz-Cyrl-UZ Uzbek (Latin) 0x0043 uz Uzbek (Latin) 0x7C43 uz-Latn Uzbek (Latin) Uzbekistan 0x0443 uz-Latn-UZ Vai 0x1000 vai Vai 0x1000 vai-Vaii Vai Liberia 0x1000 vai-Vaii-LR Vai (Latin) Liberia 0x1000 vai-Latn-LR Vai (Latin) 0x1000 vai-Latn Valencian Spain 0x0803 ca-ESvalencia Venda 0x0033 ve Venda South Africa 0x0433 ve-ZA Vietnamese 0x002A vi Vietnamese Vietnam 0x042A vi-VN Volapük 0x1000 vo Volapük World 0x1000 vo-001 Vunjo 0x1000 vun Vunjo Tanzania 0x1000 vun-TZ Walser 0x1000 wae Walser Switzerland 0x1000 wae-CH Welsh 0x0052 cy Welsh United Kingdom 0x0452 cy-GB Wolaytta 0x1000 wal Wolaytta Ethiopia 0x1000 wal-ET Wolof 0x0088 wo Wolof Senegal 0x0488 wo-SN Xhosa 0x0034 xh Xhosa South Africa 0x0434 xh-ZA Yangben 0x1000 yav Yangben Cameroon 0x1000 yav-CM Yi 0x0078 ii Yi People's Republic of China 0x0478 ii-CN Yiddish World 0x043D yi-001 Yoruba 0x006A yo Yoruba Benin 0x1000 yo-BJ Yoruba Nigeria 0x046A yo-NG Zarma 0x1000 dje Zarma Niger 0x1000 dje-NE Zulu 0x0035 zu Zulu South Africa 0x0435 zu-ZA ================================================ FILE: section.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" "math" "sort" "strings" ) const ( // ImageSectionReserved1 for future use. ImageSectionReserved1 = 0x00000000 // ImageSectionReserved2 for future use. ImageSectionReserved2 = 0x00000001 // ImageSectionReserved3 for future use. ImageSectionReserved3 = 0x00000002 // ImageSectionReserved4 for future use. ImageSectionReserved4 = 0x00000004 // ImageSectionTypeNoPad indicates the section should not be padded to the next // boundary. This flag is obsolete and is replaced by ImageSectionAlign1Bytes. // This is valid only for object files. ImageSectionTypeNoPad = 0x00000008 // ImageSectionReserved5 for future use. ImageSectionReserved5 = 0x00000010 // ImageSectionCntCode indicates the section contains executable code. ImageSectionCntCode = 0x00000020 // ImageSectionCntInitializedData indicates the section contains initialized // data. ImageSectionCntInitializedData = 0x00000040 // ImageSectionCntUninitializedData indicates the section contains uninitialized // data. ImageSectionCntUninitializedData = 0x00000080 // ImageSectionLnkOther is reserved for future use. ImageSectionLnkOther = 0x00000100 // ImageSectionLnkInfo indicates the section contains comments or other // information. The .drectve section has this type. This is valid for // object files only. ImageSectionLnkInfo = 0x00000200 // ImageSectionReserved6 for future use. ImageSectionReserved6 = 0x00000400 // ImageSectionLnkRemove indicates the section will not become part of the image // This is valid only for object files. ImageSectionLnkRemove = 0x00000800 // ImageSectionLnkComdat indicates the section contains COMDAT data. For more // information, see COMDAT Sections (Object Only). This is valid only for // object files. ImageSectionLnkCOMDAT = 0x00001000 // ImageSectionGpRel indicates the section contains data referenced through the // global pointer (GP). ImageSectionGpRel = 0x00008000 // ImageSectionMemPurgeable is reserved for future use. ImageSectionMemPurgeable = 0x00020000 // ImageSectionMem16Bit is reserved for future use. ImageSectionMem16Bit = 0x00020000 // ImageSectionMemLocked is reserved for future use. ImageSectionMemLocked = 0x00040000 // ImageSectionMemPreload is reserved for future use. ImageSectionMemPreload = 0x00080000 // ImageSectionAlign1Bytes indicates to align data on a 1-byte boundary. // Valid only for object files. ImageSectionAlign1Bytes = 0x00100000 // ImageSectionAlign2Bytes indicates to align data on a 2-byte boundary. // Valid only for object files. ImageSectionAlign2Bytes = 0x00200000 // ImageSectionAlign4Bytes indicates to align data on a 4-byte boundary. // Valid only for object files. ImageSectionAlign4Bytes = 0x00300000 // ImageSectionAlign8Bytes indicates to align data on a 8-byte boundary. // Valid only for object files. ImageSectionAlign8Bytes = 0x00400000 // ImageSectionAlign16Bytes indicates to align data on a 16-byte boundary. // Valid only for object files. ImageSectionAlign16Bytes = 0x00500000 // ImageSectionAlign32Bytes indicates to align data on a 32-byte boundary. // Valid only for object files. ImageSectionAlign32Bytes = 0x00600000 // ImageSectionAlign64Bytes indicates to align data on a 64-byte boundary. // Valid only for object files. ImageSectionAlign64Bytes = 0x00700000 // ImageSectionAlign128Bytes indicates to align data on a 128-byte boundary. // Valid only for object files. ImageSectionAlign128Bytes = 0x00800000 // ImageSectionAlign256Bytes indicates to align data on a 256-byte boundary. // Valid only for object files. ImageSectionAlign256Bytes = 0x00900000 // ImageSectionAlign512Bytes indicates to align data on a 512-byte boundary. // Valid only for object files. ImageSectionAlign512Bytes = 0x00A00000 // ImageSectionAlign1024Bytes indicates to align data on a 1024-byte boundary. // Valid only for object files. ImageSectionAlign1024Bytes = 0x00B00000 // ImageSectionAlign2048Bytes indicates to align data on a 2048-byte boundary. // Valid only for object files. ImageSectionAlign2048Bytes = 0x00C00000 // ImageSectionAlign4096Bytes indicates to align data on a 4096-byte boundary. // Valid only for object files. ImageSectionAlign4096Bytes = 0x00D00000 // ImageSectionAlign8192Bytes indicates to align data on a 8192-byte boundary. // Valid only for object files. ImageSectionAlign8192Bytes = 0x00E00000 // ImageSectionLnkNRelocOvfl indicates the section contains extended // relocations. ImageSectionLnkNRelocOvfl = 0x01000000 // ImageSectionMemDiscardable indicates the section can be discarded as needed. ImageSectionMemDiscardable = 0x02000000 // ImageSectionMemNotCached indicates the section cannot be cached. ImageSectionMemNotCached = 0x04000000 // ImageSectionMemNotPaged indicates the section is not pageable. ImageSectionMemNotPaged = 0x08000000 // ImageSectionMemShared indicates the section can be shared in memory. ImageSectionMemShared = 0x10000000 // ImageSectionMemExecute indicates the section can be executed as code. ImageSectionMemExecute = 0x20000000 // ImageSectionMemRead indicates the section can be read. ImageSectionMemRead = 0x40000000 // ImageSectionMemWrite indicates the section can be written to. ImageSectionMemWrite = 0x80000000 ) // ImageSectionHeader is part of the section table , in fact section table is an // array of Image Section Header each contains information about one section of // the whole file such as attribute,virtual offset. the array size is the number // of sections in the file. // Binary Spec : each struct is 40 byte and there is no padding . type ImageSectionHeader struct { // An 8-byte, null-padded UTF-8 encoded string. If the string is exactly 8 // characters long, there is no terminating null. For longer names, this // field contains a slash (/) that is followed by an ASCII representation of // a decimal number that is an offset into the string table. Executable // images do not use a string table and do not support section names longer // than 8 characters. Long names in object files are truncated if they are // emitted to an executable file. Name [8]uint8 `json:"name"` // The total size of the section when loaded into memory. If this value is // greater than SizeOfRawData, the section is zero-padded. This field is // valid only for executable images and should be set to zero for object files. VirtualSize uint32 `json:"virtual_size"` // For executable images, the address of the first byte of the section // relative to the image base when the section is loaded into memory. // For object files, this field is the address of the first byte before // relocation is applied; for simplicity, compilers should set this to zero. // Otherwise, it is an arbitrary value that is subtracted from offsets during // relocation. VirtualAddress uint32 `json:"virtual_address"` // The size of the section (for object files) or the size of the initialized // data on disk (for image files). For executable images, this must be a // multiple of FileAlignment from the optional header. If this is less than // VirtualSize, the remainder of the section is zero-filled. Because the // SizeOfRawData field is rounded but the VirtualSize field is not, it is // possible for SizeOfRawData to be greater than VirtualSize as well. When // a section contains only uninitialized data, this field should be zero. SizeOfRawData uint32 `json:"size_of_raw_data"` // The file pointer to the first page of the section within the COFF file. // For executable images, this must be a multiple of FileAlignment from the // optional header. For object files, the value should be aligned on a // 4-byte boundary for best performance. When a section contains only // uninitialized data, this field should be zero. PointerToRawData uint32 `json:"pointer_to_raw_data"` // The file pointer to the beginning of relocation entries for the section. // This is set to zero for executable images or if there are no relocations. PointerToRelocations uint32 `json:"pointer_to_relocations"` // The file pointer to the beginning of line-number entries for the section. // This is set to zero if there are no COFF line numbers. This value should // be zero for an image because COFF debugging information is deprecated. PointerToLineNumbers uint32 `json:"pointer_to_line_numbers"` // The number of relocation entries for the section. // This is set to zero for executable images. NumberOfRelocations uint16 `json:"number_of_relocations"` // The number of line-number entries for the section. This value should be // zero for an image because COFF debugging information is deprecated. NumberOfLineNumbers uint16 `json:"number_of_line_numbers"` // The flags that describe the characteristics of the section. Characteristics uint32 `json:"characteristics"` } // Section represents a PE section header, plus additional data like entropy. type Section struct { Header ImageSectionHeader `json:"header"` // Entropy represents the section entropy. This field is not always populated // depending on weather entropy calculation is enabled. The reason behind // using a float64 pointer instead of a float64 is to distinguish between // the case when the section entropy is equal to zero and the case when the // entropy is equal to nil - meaning that it was never calculated. Entropy *float64 `json:"entropy,omitempty"` } // ParseSectionHeader parses the PE section headers. Each row of the section // table is, in effect, a section header. It must immediately follow the PE // header. func (pe *File) ParseSectionHeader() (err error) { // Get the first section offset. optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + uint32(binary.Size(pe.NtHeader.FileHeader)) offset := optionalHeaderOffset + uint32(pe.NtHeader.FileHeader.SizeOfOptionalHeader) // Track invalid/suspicious values while parsing sections. maxErr := 3 secHeader := ImageSectionHeader{} numberOfSections := pe.NtHeader.FileHeader.NumberOfSections secHeaderSize := uint32(binary.Size(secHeader)) // The section header indexing in the table is one-based, with the order of // the sections defined by the linker. The sections follow one another // contiguously in the order defined by the section header table, with // starting RVAs aligned by the value of the SectionAlignment field of the // PE header. for i := uint16(0); i < numberOfSections; i++ { err := pe.structUnpack(&secHeader, offset, secHeaderSize) if err != nil { return err } if secEnd := int64(secHeader.PointerToRawData) + int64(secHeader.SizeOfRawData); secEnd > pe.OverlayOffset { pe.OverlayOffset = secEnd } countErr := 0 sec := Section{Header: secHeader} secName := sec.String() if (ImageSectionHeader{}) == secHeader { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+"` Contents are null-bytes") countErr++ } if secHeader.SizeOfRawData+secHeader.PointerToRawData > pe.size { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ "` SizeOfRawData is larger than file") countErr++ } if pe.adjustFileAlignment(secHeader.PointerToRawData) > pe.size { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ "` PointerToRawData points beyond the end of the file") countErr++ } if secHeader.VirtualSize > 0x10000000 { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ "` VirtualSize is extremely large > 256MiB") countErr++ } if pe.adjustSectionAlignment(secHeader.VirtualAddress) > 0x10000000 { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ "` VirtualAddress is beyond 0x10000000") countErr++ } var fileAlignment uint32 switch pe.Is64 { case true: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment case false: fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment } if fileAlignment != 0 && secHeader.PointerToRawData%fileAlignment != 0 { pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ "` PointerToRawData is not multiple of FileAlignment") countErr++ } if countErr >= maxErr { break } // Append to the list of sections. if pe.opts.SectionEntropy { entropy := sec.CalculateEntropy(pe) sec.Entropy = &entropy } pe.Sections = append(pe.Sections, sec) offset += secHeaderSize } // Sort the sections by their VirtualAddress. This will allow to check // for potentially overlapping sections in badly constructed PEs. sort.Sort(byVirtualAddress(pe.Sections)) if pe.NtHeader.FileHeader.NumberOfSections > 0 && len(pe.Sections) > 0 { offset += secHeaderSize * uint32(pe.NtHeader.FileHeader.NumberOfSections) } // There could be a problem if there are no raw data sections // greater than 0. Example: fc91013eb72529da005110a3403541b6 // Should this throw an exception in the minimum header offset // can't be found? var rawDataPointers []uint32 for _, sec := range pe.Sections { if sec.Header.PointerToRawData > 0 { rawDataPointers = append( rawDataPointers, pe.adjustFileAlignment( sec.Header.PointerToRawData)) } } var lowestSectionOffset uint32 if len(rawDataPointers) > 0 { lowestSectionOffset = Min(rawDataPointers) } else { lowestSectionOffset = 0 } if lowestSectionOffset == 0 || lowestSectionOffset < offset { if offset <= pe.size { pe.Header = pe.data[:offset] } } else { if lowestSectionOffset <= pe.size { pe.Header = pe.data[:lowestSectionOffset] } } pe.HasSections = true return nil } // String stringifies the section name. func (section *Section) String() string { return strings.Replace(string(section.Header.Name[:]), "\x00", "", -1) } // NextHeaderAddr returns the VirtualAddress of the next section. func (section *Section) NextHeaderAddr(pe *File) uint32 { for i, currentSection := range pe.Sections { if i == len(pe.Sections)-1 { return 0 } if section.Header == currentSection.Header { return pe.Sections[i+1].Header.VirtualAddress } } return 0 } // Contains checks whether the section contains a given RVA. func (section *Section) Contains(rva uint32, pe *File) bool { // Check if the SizeOfRawData is realistic. If it's bigger than the size of // the whole PE file minus the start address of the section it could be // either truncated or the SizeOfRawData contains a misleading value. // In either of those cases we take the VirtualSize. var size uint32 adjustedPointer := pe.adjustFileAlignment(section.Header.PointerToRawData) if uint32(len(pe.data))-adjustedPointer < section.Header.SizeOfRawData { size = section.Header.VirtualSize } else { size = Max(section.Header.SizeOfRawData, section.Header.VirtualSize) } vaAdj := pe.adjustSectionAlignment(section.Header.VirtualAddress) // Check whether there's any section after the current one that starts before // the calculated end for the current one. If so, cut the current section's // size to fit in the range up to where the next section starts. if section.NextHeaderAddr(pe) != 0 && section.NextHeaderAddr(pe) > section.Header.VirtualAddress && vaAdj+size > section.NextHeaderAddr(pe) { size = section.NextHeaderAddr(pe) - vaAdj } return vaAdj <= rva && rva < vaAdj+size } // Data returns a data chunk from a section. func (section *Section) Data(start, length uint32, pe *File) []byte { pointerToRawDataAdj := pe.adjustFileAlignment( section.Header.PointerToRawData) virtualAddressAdj := pe.adjustSectionAlignment( section.Header.VirtualAddress) var offset uint32 if start == 0 { offset = pointerToRawDataAdj } else { offset = (start - virtualAddressAdj) + pointerToRawDataAdj } if offset > pe.size { return nil } var end uint32 if length != 0 { end = offset + length } else { end = offset + section.Header.SizeOfRawData } // PointerToRawData is not adjusted here as we might want to read any possible // extra bytes that might get cut off by aligning the start (and hence cutting // something off the end) if end > section.Header.PointerToRawData+section.Header.SizeOfRawData && section.Header.PointerToRawData+section.Header.SizeOfRawData > offset { end = section.Header.PointerToRawData + section.Header.SizeOfRawData } if end > pe.size { end = pe.size } return pe.data[offset:end] } // CalculateEntropy calculates section entropy. func (section *Section) CalculateEntropy(pe *File) float64 { sectionData := section.Data(0, 0, pe) if sectionData == nil { return 0.0 } sectionSize := float64(len(sectionData)) if sectionSize == 0.0 { return 0.0 } var frequencies [256]uint64 for _, v := range sectionData { frequencies[v]++ } var entropy float64 for _, p := range frequencies { if p > 0 { freq := float64(p) / sectionSize entropy += freq * math.Log2(freq) } } return -entropy } // byVirtualAddress sorts all sections by Virtual Address. type byVirtualAddress []Section func (s byVirtualAddress) Len() int { return len(s) } func (s byVirtualAddress) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byVirtualAddress) Less(i, j int) bool { return s[i].Header.VirtualAddress < s[j].Header.VirtualAddress } // byPointerToRawData sorts all sections by PointerToRawData. type byPointerToRawData []Section func (s byPointerToRawData) Len() int { return len(s) } func (s byPointerToRawData) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byPointerToRawData) Less(i, j int) bool { return s[i].Header.PointerToRawData < s[j].Header.PointerToRawData } // PrettySectionFlags returns the string representations of the `Flags` field // of section header. func (section *Section) PrettySectionFlags() []string { var values []string sectionFlags := map[uint32]string{ //ImageSectionReserved1: "Reserved1", ImageSectionReserved2: "Reserved2", ImageSectionReserved3: "Reserved3", ImageSectionReserved4: "Reserved4", ImageSectionTypeNoPad: "No Padd", ImageSectionReserved5: "Reserved5", ImageSectionCntCode: "Contains Code", ImageSectionCntInitializedData: "Initialized Data", ImageSectionCntUninitializedData: "Uninitialized Data", ImageSectionLnkOther: "Lnk Other", ImageSectionLnkInfo: "Lnk Info", ImageSectionReserved6: "Reserved6", ImageSectionLnkRemove: "LnkRemove", ImageSectionLnkCOMDAT: "LnkCOMDAT", ImageSectionGpRel: "GpReferenced", ImageSectionMemPurgeable: "Purgeable", ImageSectionMemLocked: "Locked", ImageSectionMemPreload: "Preload", ImageSectionAlign1Bytes: "Align1Bytes", ImageSectionAlign2Bytes: "Align2Bytes", ImageSectionAlign4Bytes: "Align4Bytes", ImageSectionAlign8Bytes: "Align8Bytes", ImageSectionAlign16Bytes: "Align16Bytes", ImageSectionAlign32Bytes: "Align32Bytes", ImageSectionAlign64Bytes: "Align64Bytes", ImageSectionAlign128Bytes: "Align128Bytes", ImageSectionAlign256Bytes: "Align256Bytes", ImageSectionAlign512Bytes: "Align512Bytes", ImageSectionAlign1024Bytes: "Align1024Bytes", ImageSectionAlign2048Bytes: "Align2048Bytes", ImageSectionAlign4096Bytes: "Align4096Bytes", ImageSectionAlign8192Bytes: "Align8192Bytes", ImageSectionLnkNRelocOvfl: "ExtendedReloc", ImageSectionMemDiscardable: "Discardable", ImageSectionMemNotCached: "NotCached", ImageSectionMemNotPaged: "NotPaged", ImageSectionMemShared: "Shared", ImageSectionMemExecute: "Executable", ImageSectionMemRead: "Readable", ImageSectionMemWrite: "Writable", } flags := section.Header.Characteristics for k, v := range sectionFlags { if (k & flags) == k { values = append(values, v) } } return values } ================================================ FILE: section_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "sort" "testing" ) type TestSection struct { sectionCount int sectionIndex int sectionName string header ImageSectionHeader sectionFlags []string entropy float64 } func TestParseSectionHeaders(t *testing.T) { tests := []struct { in string out TestSection }{ {getAbsoluteFilePath("test/putty.exe"), TestSection{ sectionCount: 8, sectionIndex: 3, sectionName: ".pdata", header: ImageSectionHeader{ Name: [8]uint8{0x2e, 0x70, 0x64, 0x61, 0x74, 0x61, 0x0, 0x0}, VirtualSize: 0x588c, VirtualAddress: 0xd2000, SizeOfRawData: 0x5a00, PointerToRawData: 0xc9c00, Characteristics: 0x40000040, }, sectionFlags: []string{"Initialized Data", "Readable"}, entropy: 5.789589357441211, }}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } sections := file.Sections if len(sections) != tt.out.sectionCount { t.Errorf("sections count assertion failed, got %v, want %v", len(sections), tt.out.sectionCount) } section := sections[tt.out.sectionIndex] if !reflect.DeepEqual(section.Header, tt.out.header) { t.Errorf("section header assertion failed, got %v, want %v", section.Header, tt.out.header) } sectionName := sections[tt.out.sectionIndex].String() if sectionName != tt.out.sectionName { t.Errorf("section name assertion failed, got %v, want %v", sectionName, tt.out.sectionName) } prettySectionFlags := section.PrettySectionFlags() sort.Strings(prettySectionFlags) if !reflect.DeepEqual(prettySectionFlags, tt.out.sectionFlags) { t.Errorf("pretty section flags assertion failed, got %v, want %v", prettySectionFlags, tt.out.sectionFlags) } entropy := sections[tt.out.sectionIndex].CalculateEntropy(file) if entropy != tt.out.entropy { t.Errorf("entropy calculation failed, got %v, want %v", entropy, tt.out.entropy) } }) } } ================================================ FILE: security.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "bytes" "crypto" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/binary" "encoding/hex" "errors" "fmt" "hash" "io" "os" "os/exec" "path/filepath" "runtime" "sort" "strings" "time" "github.com/ayoubfaouzi/pkcs7" ) // The options for the WIN_CERTIFICATE Revision member include // (but are not limited to) the following. const ( // WinCertRevision1_0 represents the WIN_CERT_REVISION_1_0 Version 1, // legacy version of the Win_Certificate structure. // It is supported only for purposes of verifying legacy Authenticode // signatures WinCertRevision1_0 = 0x0100 // WinCertRevision2_0 represents the WIN_CERT_REVISION_2_0. Version 2 // is the current version of the Win_Certificate structure. WinCertRevision2_0 = 0x0200 ) // The options for the WIN_CERTIFICATE CertificateType member include // (but are not limited to) the items in the following table. Note that some // values are not currently supported. const ( // Certificate contains an X.509 Certificate (Not Supported) WinCertTypeX509 = 0x0001 // Certificate contains a PKCS#7 SignedData structure. WinCertTypePKCSSignedData = 0x0002 // Reserved. WinCertTypeReserved1 = 0x0003 // Terminal Server Protocol Stack Certificate signing (Not Supported). WinCertTypeTSStackSigned = 0x0004 ) var ( // ErrSecurityDataDirInvalidCertHeader is reported when the certificate // header in the security directory is invalid. ErrSecurityDataDirInvalid = errors.New( `invalid certificate header in security directory`) ) // CertificateSection represents the security directory of a PE file, which // contains Authenticode signatures used to verify the integrity and origin of // the binary. The raw PKCS#7 data is parsed into one or more certificates, // including any nested counter-signatures. type CertificateSection struct { // Header is the WIN_CERTIFICATE structure at the start of the security // directory, specifying the length, revision, and type of the certificate. Header WinCertificate `json:"header"` // Raw contains the raw PKCS#7 signed data bytes (excluding the // WIN_CERTIFICATE header). This field is excluded from JSON output. Raw []byte `json:"-"` // Certificates holds the parsed certificate chain. The first entry is the // primary Authenticode signature; subsequent entries are nested // counter-signatures extracted from unsigned PKCS#7 attributes. Certificates []Certificate `json:"certificates,omitempty"` } // Certificate represents a parsed Authenticode signature extracted from the // PE security directory. It pairs the full PKCS#7 content with validation // results: whether the signature hash matches the PE image (SignatureValid) // and whether the signing certificate chains to a trusted root (Verified). type Certificate struct { // Content is the full parsed PKCS#7 structure. Excluded from JSON output. Content pkcs7.PKCS7 `json:"-"` // SignatureContent holds the parsed Authenticode digest algorithm and hash // from the SpcIndirectDataContent. Excluded from JSON output. SignatureContent AuthenticodeContent `json:"-"` // SignatureValid is true when the Authenticode hash in the signature // matches the computed authentihash of the PE image. SignatureValid bool `json:"signature_valid"` // Info contains the human-readable certificate metadata (issuer, subject, // validity period, serial number, and algorithms). Info CertInfo `json:"info"` // Verified is true when the signing certificate chains to a trusted root // in the system certificate store. Verified bool `json:"verified"` } // WinCertificate encapsulates a signature used in verifying executable files. type WinCertificate struct { // Specifies the length, in bytes, of the signature. Length uint32 `json:"length"` // Specifies the certificate revision. Revision uint16 `json:"revision"` // Specifies the type of certificate. CertificateType uint16 `json:"certificate_type"` } // CertInfo wraps the important fields of the pkcs7 structure. // This is what we what keep in JSON marshalling. type CertInfo struct { // The certificate authority (CA) that charges customers to issue // certificates for them. Issuer string `json:"issuer"` // The subject of the certificate is the entity its public key is associated // with (i.e. the "owner" of the certificate). Subject string `json:"subject"` // The certificate won't be valid before this timestamp. NotBefore time.Time `json:"not_before"` // The certificate won't be valid after this timestamp. NotAfter time.Time `json:"not_after"` // The serial number MUST be a positive integer assigned by the CA to each // certificate. It MUST be unique for each certificate issued by a given CA // (i.e., the issuer name and serial number identify a unique certificate). // CAs MUST force the serialNumber to be a non-negative integer. // For convenience, we convert the big int to string. SerialNumber string `json:"serial_number"` // The identifier for the cryptographic algorithm used by the CA to sign // this certificate. SignatureAlgorithm x509.SignatureAlgorithm `json:"signature_algorithm"` // The Public Key Algorithm refers to the public key inside the certificate. // This certificate is used together with the matching private key to prove // the identity of the peer. PublicKeyAlgorithm x509.PublicKeyAlgorithm `json:"public_key_algorithm"` } type RelRange struct { Start uint32 Length uint32 } type byStart []RelRange func (s byStart) Len() int { return len(s) } func (s byStart) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byStart) Less(i, j int) bool { return s[i].Start < s[j].Start } type Range struct { Start uint32 End uint32 } func (pe *File) parseLocations() (map[string]*RelRange, error) { location := make(map[string]*RelRange, 3) fileHdrSize := uint32(binary.Size(pe.NtHeader.FileHeader)) optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + fileHdrSize var ( oh32 ImageOptionalHeader32 oh64 ImageOptionalHeader64 optionalHeaderSize uint32 ) switch pe.Is64 { case true: oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) optionalHeaderSize = oh64.SizeOfHeaders case false: oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) optionalHeaderSize = oh32.SizeOfHeaders } if optionalHeaderSize > pe.size-optionalHeaderOffset { msgF := "the optional header exceeds the file length (%d + %d > %d)" return nil, fmt.Errorf(msgF, optionalHeaderSize, optionalHeaderOffset, pe.size) } if optionalHeaderSize < 68 { msgF := "the optional header size is %d < 68, which is insufficient for authenticode" return nil, fmt.Errorf(msgF, optionalHeaderSize) } // The location of the checksum location["checksum"] = &RelRange{optionalHeaderOffset + 64, 4} var rvaBase, certBase, numberOfRvaAndSizes uint32 switch pe.Is64 { case true: rvaBase = optionalHeaderOffset + 108 certBase = optionalHeaderOffset + 144 numberOfRvaAndSizes = oh64.NumberOfRvaAndSizes case false: rvaBase = optionalHeaderOffset + 92 certBase = optionalHeaderOffset + 128 numberOfRvaAndSizes = oh32.NumberOfRvaAndSizes } if optionalHeaderOffset+optionalHeaderSize < rvaBase+4 { pe.logger.Debug("The PE Optional Header size can not accommodate for the NumberOfRvaAndSizes field") return location, nil } if numberOfRvaAndSizes < uint32(5) { pe.logger.Debugf("The PE Optional Header does not have a Certificate Table entry in its "+ "Data Directory; NumberOfRvaAndSizes = %d", numberOfRvaAndSizes) return location, nil } if optionalHeaderOffset+optionalHeaderSize < certBase+8 { pe.logger.Debug("The PE Optional Header size can not accommodate for a Certificate Table" + "entry in its Data Directory") return location, nil } // The location of the entry of the Certificate Table in the Data Directory location["datadir_certtable"] = &RelRange{certBase, 8} var address, size uint32 switch pe.Is64 { case true: dirEntry := oh64.DataDirectory[ImageDirectoryEntryCertificate] address = dirEntry.VirtualAddress size = dirEntry.Size case false: dirEntry := oh32.DataDirectory[ImageDirectoryEntryCertificate] address = dirEntry.VirtualAddress size = dirEntry.Size } if size == 0 { pe.logger.Debug("The Certificate Table is empty") return location, nil } if int64(address) < int64(optionalHeaderSize)+int64(optionalHeaderOffset) || int64(address)+int64(size) > int64(pe.size) { pe.logger.Debugf("The location of the Certificate Table in the binary makes no sense and "+ "is either beyond the boundaries of the file, or in the middle of the PE header; "+ "VirtualAddress: %x, Size: %x", address, size) return location, nil } // The location of the Certificate Table location["certtable"] = &RelRange{address, size} return location, nil } // Authentihash generates the SHA256 pe image file hash. // The relevant sections to exclude during hashing are: // - The location of the checksum // - The location of the entry of the Certificate Table in the Data Directory // - The location of the Certificate Table. func (pe *File) Authentihash() []byte { results := pe.AuthentihashExt(crypto.SHA256.New()) if len(results) > 0 { return results[0] } return nil } // AuthentihashExt generates pe image file hashes using the given hashers. // The relevant sections to exclude during hashing are: // - The location of the checksum // - The location of the entry of the Certificate Table in the Data Directory // - The location of the Certificate Table. func (pe *File) AuthentihashExt(hashers ...hash.Hash) [][]byte { locationMap, err := pe.parseLocations() if err != nil { return nil } locationSlice := make([]RelRange, 0, len(locationMap)) for k, v := range locationMap { if stringInSlice(k, []string{"checksum", "datadir_certtable", "certtable"}) { locationSlice = append(locationSlice, *v) } } sort.Sort(byStart(locationSlice)) ranges := make([]*Range, 0, len(locationSlice)) start := uint32(0) for _, r := range locationSlice { ranges = append(ranges, &Range{Start: start, End: r.Start}) start = r.Start + r.Length } ranges = append(ranges, &Range{Start: start, End: pe.size}) var rd io.ReaderAt if pe.f != nil { rd = pe.f } else { rd = bytes.NewReader(pe.data) } for _, v := range ranges { for _, hasher := range hashers { sr := io.NewSectionReader(rd, int64(v.Start), int64(v.End)-int64(v.Start)) io.Copy(hasher, sr) sr.Seek(0, io.SeekStart) } } var ret [][]byte for _, hasher := range hashers { ret = append(ret, hasher.Sum(nil)) } return ret } // The security directory contains the authenticode signature, which is a digital // signature format that is used, among other purposes, to determine the origin // and integrity of software binaries. Authenticode is based on the Public-Key // Cryptography Standards (PKCS) #7 standard and uses X.509 v3 certificates to // bind an Authenticode-signed file to the identity of a software publisher. // This data are not loaded into memory as part of the image file. func (pe *File) parseSecurityDirectory(rva, size uint32) error { var certHeader WinCertificate certSize := uint32(binary.Size(certHeader)) signatureContent := AuthenticodeContent{} // The virtual address value from the Certificate Table entry in the // Optional Header Data Directory is a file offset to the first attribute // certificate entry. fileOffset := rva err := pe.structUnpack(&certHeader, fileOffset, certSize) if err != nil { return ErrOutsideBoundary } if certHeader.Length > size { return ErrOutsideBoundary } if fileOffset+certHeader.Length > pe.size { return ErrOutsideBoundary } if certHeader.Length < certSize { return ErrSecurityDataDirInvalid } pe.HasCertificate = true pe.Certificates.Header = certHeader pe.Certificates.Raw = pe.data[fileOffset+certSize : fileOffset+certHeader.Length] certContent := pe.Certificates.Raw for { pkcs, err := pkcs7.Parse(certContent) if err != nil { return err } // The pkcs7.PKCS7 structure contains many fields that we are not // interested to, so create another structure, similar to _CERT_INFO // structure which contains only the important information. var signerCertificate = pkcs.GetOnlySigner() if signerCertificate == nil { return errors.New("could not find signer certificate") } var certInfo CertInfo certInfo.SerialNumber = hex.EncodeToString(signerCertificate.SerialNumber.Bytes()) certInfo.PublicKeyAlgorithm = signerCertificate.PublicKeyAlgorithm certInfo.NotAfter = signerCertificate.NotAfter certInfo.NotBefore = signerCertificate.NotBefore // Issuer infos certInfo.Issuer = formatPkixName(signerCertificate.Issuer) // Subject infos certInfo.Subject = formatPkixName(signerCertificate.Subject) // Let's mark the file as signed, then we verify if the signature is valid. pe.IsSigned = true var certValid bool // Let's load the system root certs. if !pe.opts.DisableCertValidation { var certPool *x509.CertPool if runtime.GOOS == "windows" { certPool, err = loadSystemRoots() } else { certPool, err = x509.SystemCertPool() } // Verify the signature. This will also verify the chain of trust of the // the end-entity signer cert to one of the root in the trust store. if err != nil { pe.logger.Errorf("failed to loadSystemRoots: %v", err) } else { err = pkcs.VerifyWithChain(certPool) if err == nil { certValid = true } else { certValid = false } } } var signatureValid bool signatureContent, err = parseAuthenticodeContent(pkcs.Content) if err != nil { pe.logger.Errorf("could not parse authenticode content: %v", err) signatureValid = false } else if !pe.opts.DisableSignatureValidation { authentihash := pe.AuthentihashExt(signatureContent.HashFunction.New()) if len(authentihash) > 0 { signatureValid = bytes.Equal(authentihash[0], signatureContent.HashResult) && certValid } } certInfo.SignatureAlgorithm = signatureContent.Algorithm pe.Certificates.Certificates = append(pe.Certificates.Certificates, Certificate{ Content: *pkcs, SignatureContent: signatureContent, SignatureValid: signatureValid, Info: certInfo, Verified: certValid, }) // Subsequent certificates are an (unsigned) attribute of the PKCS#7 var newCert asn1.RawValue nestedSignatureOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 4, 1} err = pkcs.UnmarshalUnsignedAttribute(nestedSignatureOid, &newCert) if err != nil { var attributeNotFound pkcs7.AttributeNotFoundError if errors.As(err, &attributeNotFound) { break // No further nested certificates } return err } certContent = newCert.FullBytes } return nil } // loadSystemsRoots manually downloads all the trusted root certificates // in Windows by spawning certutil then adding root certs individually // to the cert pool. Initially, when running in windows, go SystemCertPool() // used to enumerate all the certificate in the Windows store using // (CertEnumCertificatesInStore). Unfortunately, Windows does not ship // with all of its root certificates installed. Instead, it downloads them // on-demand. As a consequence, this behavior leads to a non-deterministic // results. Go team then disabled the loading Windows root certs. func loadSystemRoots() (*x509.CertPool, error) { needSync := true roots := x509.NewCertPool() // Create a temporary dir in the OS temp folder // if it does not exists. dir := filepath.Join(os.TempDir(), "certs") info, err := os.Stat(dir) if os.IsNotExist(err) { if err = os.Mkdir(dir, 0755); err != nil { return roots, err } } else { now := time.Now() modTime := info.ModTime() diff := now.Sub(modTime).Hours() if diff < 24 { needSync = false } } // Use certutil to download all the root certs. if needSync { cmd := exec.Command("certutil", "-syncWithWU", dir) hideWindow(cmd) err := cmd.Run() if err != nil { return roots, err } if cmd.ProcessState.ExitCode() != 0 { return roots, err } } files, err := os.ReadDir(dir) if err != nil { return roots, err } for _, f := range files { if !strings.HasSuffix(f.Name(), ".crt") { continue } certPath := filepath.Join(dir, f.Name()) certData, err := os.ReadFile(certPath) if err != nil { return roots, err } if crt, err := x509.ParseCertificate(certData); err == nil { roots.AddCert(crt) } } return roots, nil } type SpcIndirectDataContent struct { Data SpcAttributeTypeAndOptionalValue MessageDigest DigestInfo } type SpcAttributeTypeAndOptionalValue struct { Type asn1.ObjectIdentifier Value SpcPeImageData `asn1:"optional"` } type SpcPeImageData struct { Flags asn1.BitString File asn1.RawValue } type DigestInfo struct { DigestAlgorithm pkix.AlgorithmIdentifier Digest []byte } // Translation of algorithm identifier to hash algorithm, copied from pkcs7.getHashForOID func parseHashAlgorithm(identifier pkix.AlgorithmIdentifier) (crypto.Hash, x509.SignatureAlgorithm, error) { oid := identifier.Algorithm switch { case oid.Equal(pkcs7.OIDDigestAlgorithmMD5): return crypto.MD5, x509.MD5WithRSA, nil case oid.Equal(pkcs7.OIDDigestAlgorithmSHA1), oid.Equal(pkcs7.OIDEncryptionAlgorithmRSA): return crypto.SHA1, x509.SHA1WithRSA, nil case oid.Equal(pkcs7.OIDDigestAlgorithmECDSASHA1): return crypto.SHA1, x509.ECDSAWithSHA1, nil case oid.Equal(pkcs7.OIDDigestAlgorithmDSA), oid.Equal(pkcs7.OIDDigestAlgorithmDSASHA1): return crypto.SHA1, x509.DSAWithSHA1, nil case oid.Equal(pkcs7.OIDDigestAlgorithmSHA256): return crypto.SHA256, x509.SHA256WithRSA, nil case oid.Equal(pkcs7.OIDDigestAlgorithmECDSASHA256): return crypto.SHA256, x509.ECDSAWithSHA256, nil case oid.Equal(pkcs7.OIDDigestAlgorithmSHA384): return crypto.SHA384, x509.SHA384WithRSA, nil case oid.Equal(pkcs7.OIDDigestAlgorithmECDSASHA384): return crypto.SHA384, x509.ECDSAWithSHA384, nil case oid.Equal(pkcs7.OIDDigestAlgorithmSHA512): return crypto.SHA512, x509.SHA512WithRSA, nil case oid.Equal(pkcs7.OIDDigestAlgorithmECDSASHA512): return crypto.SHA512, x509.ECDSAWithSHA512, nil } return 0, 0, pkcs7.ErrUnsupportedAlgorithm } // AuthenticodeContent provides a simplified view on SpcIndirectDataContent, which specifies the ASN.1 encoded values of // the authenticode signature content. type AuthenticodeContent struct { Algorithm x509.SignatureAlgorithm HashFunction crypto.Hash HashResult []byte } func parseAuthenticodeContent(content []byte) (AuthenticodeContent, error) { var authenticodeContent SpcIndirectDataContent content, err := asn1.Unmarshal(content, &authenticodeContent.Data) if err != nil { return AuthenticodeContent{}, err } _, err = asn1.Unmarshal(content, &authenticodeContent.MessageDigest) if err != nil { return AuthenticodeContent{}, err } hashFunction, algorithmID, err := parseHashAlgorithm(authenticodeContent.MessageDigest.DigestAlgorithm) if err != nil { return AuthenticodeContent{}, err } return AuthenticodeContent{ Algorithm: algorithmID, HashFunction: hashFunction, HashResult: authenticodeContent.MessageDigest.Digest, }, nil } func formatPkixName(name pkix.Name) string { var parts []string if len(name.Country) > 0 { parts = append(parts, name.Country[0]) } if len(name.Province) > 0 { parts = append(parts, name.Province[0]) } if len(name.Locality) > 0 { parts = append(parts, name.Locality[0]) } if len(name.Organization) > 0 { parts = append(parts, name.Organization[0]) } if name.CommonName != "" { parts = append(parts, name.CommonName) } return strings.Join(parts, ", ") } ================================================ FILE: security_linux_mac.go ================================================ //go:build !windows // +build !windows package pe import "os/exec" func hideWindow(cmd *exec.Cmd) { } ================================================ FILE: security_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "crypto/x509" "fmt" "path/filepath" "reflect" "runtime" "testing" "time" ) type TestSecurityEntry struct { Header WinCertificate Certificates []Certificate err error } func TestParseSecurityDirectory(t *testing.T) { tests := []struct { in string out TestSecurityEntry }{ { getAbsoluteFilePath("test/putty.exe"), TestSecurityEntry{ Header: WinCertificate{ Length: 0x3D90, Revision: 0x200, CertificateType: 0x2, }, Certificates: []Certificate{ { Info: CertInfo{ Issuer: "GB, Greater Manchester, Salford, COMODO CA Limited, COMODO RSA Code Signing CA", Subject: "GB, Cambridgeshire, Cambridge, Simon Tatham, Simon Tatham", NotBefore: time.Date(2018, time.November, 13, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2021, time.November, 8, 23, 59, 59, 0, time.UTC), SerialNumber: "7c1118cbbadc95da3752c46e47a27438", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA1WithRSA, }, Verified: true, SignatureValid: true, }, { Info: CertInfo{ Issuer: "GB, Greater Manchester, Salford, COMODO CA Limited, COMODO RSA Code Signing CA", Subject: "GB, Cambridgeshire, Cambridge, Simon Tatham, Simon Tatham", NotBefore: time.Date(2018, time.November, 13, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2021, time.November, 8, 23, 59, 59, 0, time.UTC), SerialNumber: "7c1118cbbadc95da3752c46e47a27438", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, }, Verified: true, SignatureValid: true, }, }, err: nil, }, }, { getAbsoluteFilePath("test/putty_modified.exe"), TestSecurityEntry{ Header: WinCertificate{ Length: 0x3D90, Revision: 0x200, CertificateType: 0x2, }, Certificates: []Certificate{ { Info: CertInfo{ Issuer: "GB, Greater Manchester, Salford, COMODO CA Limited, COMODO RSA Code Signing CA", Subject: "GB, Cambridgeshire, Cambridge, Simon Tatham, Simon Tatham", NotBefore: time.Date(2018, time.November, 13, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2021, time.November, 8, 23, 59, 59, 0, time.UTC), SerialNumber: "7c1118cbbadc95da3752c46e47a27438", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA1WithRSA, }, Verified: true, SignatureValid: false, }, { Info: CertInfo{ Issuer: "GB, Greater Manchester, Salford, COMODO CA Limited, COMODO RSA Code Signing CA", Subject: "GB, Cambridgeshire, Cambridge, Simon Tatham, Simon Tatham", NotBefore: time.Date(2018, time.November, 13, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2021, time.November, 8, 23, 59, 59, 0, time.UTC), SerialNumber: "7c1118cbbadc95da3752c46e47a27438", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, }, Verified: true, SignatureValid: false, }, }, err: nil, }, }, { getAbsoluteFilePath("test/579fd8a0385482fb4c789561a30b09f25671e86422f40ef5cca2036b28f99648"), TestSecurityEntry{ Header: WinCertificate{ Length: 0x3488, Revision: 0x200, CertificateType: 0x2, }, Certificates: []Certificate{ { Info: CertInfo{ Issuer: "US, VeriSign, Inc., VeriSign Class 3 Code Signing 2010 CA", Subject: "US, California, Mountain View, Symantec Corporation, Symantec Corporation", NotBefore: time.Date(2016, time.December, 16, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2017, time.December, 17, 23, 59, 59, 0, time.UTC), SerialNumber: "0ebfea68d677b3e26cab41c33f3e69de", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA1WithRSA, }, Verified: false, SignatureValid: false, }, { Info: CertInfo{ Issuer: "US, Symantec Corporation, Symantec Class 3 SHA256 Code Signing CA - G2", Subject: "US, California, Mountain View, Symantec Corporation, Symantec Corporation", NotBefore: time.Date(2017, time.March, 15, 00, 00, 0, 0, time.UTC), NotAfter: time.Date(2018, time.April, 13, 23, 59, 59, 0, time.UTC), SerialNumber: "2e6be6bd11a8676e6c57909e9b0d5f57", PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, }, Verified: false, SignatureValid: false, }, }, err: nil, }, }, { getAbsoluteFilePath("test/00121dae38f26a33da2990987db58738c5a5966930126a42f606a3b40e014624"), TestSecurityEntry{ err: ErrSecurityDataDirInvalid, }, }, } for _, tt := range tests { t.Run(filepath.Base(tt.in), func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 if file.Is64 { oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryCertificate] va = dirEntry.VirtualAddress size = dirEntry.Size } else { oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryCertificate] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseSecurityDirectory(va, size) if err != tt.out.err { t.Fatalf("parseSecurityDirectory(%s) failed, reason: %v", tt.in, err) } got := file.Certificates if tt.out.err == nil { if !reflect.DeepEqual(got.Header, tt.out.Header) { t.Fatalf("certificate header assertion failed, got %v, want %v", got.Header, tt.out.Header) } } if len(got.Certificates) != len(tt.out.Certificates) { t.Fatalf("certificate count assertion failed, got %d, want %d", len(got.Certificates), len(tt.out.Certificates)) } for i, cert := range got.Certificates { expected := tt.out.Certificates[i] if !reflect.DeepEqual(cert.Info, expected.Info) { t.Fatalf("certificate info %d assertion failed, got %v, want %v", i, cert.Info, expected.Info) } if runtime.GOOS == "linux" || runtime.GOOS == "windows" { if expected.SignatureValid != cert.SignatureValid { t.Fatalf("signature verification %d failed, cert %v, want %v", i, cert.SignatureValid, expected.SignatureValid) } if expected.Verified != cert.Verified { t.Fatalf("certificate verification %d failed, cert %v, want %v", i, cert.Verified, expected.Verified) } } } }) } } func TestAuthentihash(t *testing.T) { tests := []struct { in string out string }{ {getAbsoluteFilePath("test/putty.exe"), "8be7d65593b0fff2e8b29004640261b8a0d4fcc651a14cd0b8b702b7928f8ee0"}, {getAbsoluteFilePath("test/mscorlib.dll"), "a52bd7784efbf206dbda2db058f3928deaf15f6fedf2773affae56023e2f0edb"}, {getAbsoluteFilePath("test/liblzo2-2.dll"), "ae603480b92c7ea3feca164010d2594f9a5282f8b732ecaa0aca29f3225835f6"}, {getAbsoluteFilePath("test/kernel32.dll"), "595e4eb556587a1363ff297df9f354a377963ecac0bed19230992b9601426aae"}, {getAbsoluteFilePath("test/mfc40u.dll"), "5c8acdf9b2c7854c6b8e22e973d2fbae9c68fc22513d24c68c8e8010b1663e67"}, {getAbsoluteFilePath("test/000057fd78f66e64e15f5070364c824a8923b6216bd8bcf6368857fb9674c483"), ""}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } hash := file.Authentihash() got := fmt.Sprintf("%x", hash) if string(got) != tt.out { t.Errorf("Authentihash(%s) got %v, want %v", tt.in, got, tt.out) } }) } } ================================================ FILE: security_windows.go ================================================ //go:build windows // +build windows package pe import ( "os/exec" "syscall" ) func hideWindow(cmd *exec.Cmd) { cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} } ================================================ FILE: staticcheck.conf ================================================ checks = ["all", "-ST1000", "-U1000", "-SA1019"] ================================================ FILE: symbol.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "bytes" "encoding/binary" "errors" "strings" ) const ( // MaxDefaultSymbolsCount represents the default maximum number of COFF // symbols to parse. Some malware uses a fake huge NumberOfSymbols that // can cause an OOM exception. // Example: 0000e876c5b712b6b7b3ce97f757ddd918fb3dbdc5a3938e850716fbd841309f MaxDefaultCOFFSymbolsCount = 0x10000 // MaxCOFFSymStrLength represents the maximum string length of a COFF symbol // to read. MaxCOFFSymStrLength = 0x50 // // Type Representation // // ImageSymTypeNull indicates no type information or unknown base type. // Microsoft tools use this setting. ImageSymTypeNull = 0 // ImageSymTypeVoid indicates no type no valid type; used with void pointers and functions. ImageSymTypeVoid = 1 // ImageSymTypeChar indicates a character (signed byte). ImageSymTypeChar = 2 // ImageSymTypeShort indicates a 2-byte signed integer. ImageSymTypeShort = 3 // ImageSymTypeInt indicates a natural integer type (normally 4 bytes in // Windows). ImageSymTypeInt = 4 // ImageSymTypeLong indicates a 4-byte signed integer. ImageSymTypeLong = 5 // ImageSymTypeFloat indicates a 4-byte floating-point number. ImageSymTypeFloat = 6 // ImageSymTypeDouble indicates an 8-byte floating-point number. ImageSymTypeDouble = 7 // ImageSymTypeStruct indicates a structure. ImageSymTypeStruct = 8 // ImageSymTypeUnion indicates a union. ImageSymTypeUnion = 9 // ImageSymTypeEnum indicates an enumerated type. ImageSymTypeEnum = 10 // ImageSymTypeMoe A member of enumeration (a specific value). ImageSymTypeMoe = 11 // ImageSymTypeByte indicates a byte; unsigned 1-byte integer. ImageSymTypeByte = 12 // ImageSymTypeWord indicates a word; unsigned 2-byte integer. ImageSymTypeWord = 13 // ImageSymTypeUint indicates an unsigned integer of natural size // (normally, 4 bytes). ImageSymTypeUint = 14 // ImageSymTypeDword indicates an unsigned 4-byte integer. ImageSymTypeDword = 15 // // Storage Class // // ImageSymClassEndOfFunction indicates a special symbol that represents // the end of function, for debugging purposes. ImageSymClassEndOfFunction = 0xff // ImageSymClassNull indicates no assigned storage class. ImageSymClassNull = 0 // ImageSymClassAutomatic indicates automatic (stack) variable. The Value // field specifies the stack frame offset. ImageSymClassAutomatic = 1 // ImageSymClassExternal indicates a value that Microsoft tools use for // external symbols. The Value field indicates the size if the section // number is IMAGE_SYM_UNDEFINED (0). If the section number is not zero, // then the Value field specifies the offset within the section. ImageSymClassExternal = 2 // ImageSymClassStatic indicates the offset of the symbol within the // section. If the Value field is zero, then the symbol represents a // section name. ImageSymClassStatic = 3 // ImageSymClassRegister indicates a register variable. The Value field // specifies the register number. ImageSymClassRegister = 4 // ImageSymClassExternalDef indicates a symbol that is defined externally. ImageSymClassExternalDef = 5 // ImageSymClassLabel indicates a code label that is defined within the // module. The Value field specifies the offset of the symbol within the // section. ImageSymClassLabel = 6 // ImageSymClassUndefinedLabel indicates a reference to a code label that // is not defined. ImageSymClassUndefinedLabel = 7 // ImageSymClassMemberOfStruct indicates the structure member. The Value // field specifies the n th member. ImageSymClassMemberOfStruct = 8 // ImageSymClassArgument indicates a formal argument (parameter) of a // function. The Value field specifies the n th argument. ImageSymClassArgument = 9 // ImageSymClassStructTag indicates the structure tag-name entry. ImageSymClassStructTag = 10 // ImageSymClassMemberOfUnion indicates a union member. The Value field // specifies the n th member. ImageSymClassMemberOfUnion = 11 // ImageSymClassUnionTag indicates the structure tag-name entry. ImageSymClassUnionTag = 12 // ImageSymClassTypeDefinition indicates a typedef entry. ImageSymClassTypeDefinition = 13 // ImageSymClassUndefinedStatic indicates a static data declaration. ImageSymClassUndefinedStatic = 14 // ImageSymClassEnumTag indicates an enumerated type tagname entry. ImageSymClassEnumTag = 15 // ImageSymClassMemberOfEnum indicates a member of an enumeration. The // Value field specifies the n th member. ImageSymClassMemberOfEnum = 16 // ImageSymClassRegisterParam indicates a register parameter. ImageSymClassRegisterParam = 17 // ImageSymClassBitField indicates a bit-field reference. The Value field // specifies the n th bit in the bit field. ImageSymClassBitField = 18 // ImageSymClassBlock indicates a .bb (beginning of block) or .eb (end of // block) record. The Value field is the relocatable address of the code // location. ImageSymClassBlock = 100 // ImageSymClassFunction indicates a value that Microsoft tools use for // symbol records that define the extent of a function: begin function (.bf // ), end function ( .ef ), and lines in function ( .lf ). For .lf // records, the Value field gives the number of source lines in the // function. For .ef records, the Value field gives the size of the // function code. ImageSymClassFunction = 101 // ImageSymClassEndOfStruct indicates an end-of-structure entry. ImageSymClassEndOfStruct = 102 // ImageSymClassFile indicates a value that Microsoft tools, as well as // traditional COFF format, use for the source-file symbol record. The // symbol is followed by auxiliary records that name the file. ImageSymClassFile = 103 // ImageSymClassSsection indicates a definition of a section (Microsoft // tools use STATIC storage class instead). ImageSymClassSsection = 104 // ImageSymClassWeakExternal indicates a weak external. For more // information, see Auxiliary Format 3: Weak Externals. ImageSymClassWeakExternal = 24 // ImageSymClassClrToken indicates a CLR token symbol. The name is an ASCII // string that consists of the hexadecimal value of the token. For more // information, see CLR Token Definition (Object Only). ImageSymClassClrToken = 25 // // Section Number Values. // // ImageSymUndefined indicates that the symbol record is not yet assigned a // section. A value of zero indicates that a reference to an external // symbol is defined elsewhere. A value of non-zero is a common symbol with // a size that is specified by the value. ImageSymUndefined = 0 // ImageSymAbsolute indicates that the symbol has an absolute // (non-relocatable) value and is not an address. ImageSymAbsolute = -1 // ImageSymDebug indicates that the symbol provides general type or // debugging information but does not correspond to a section. Microsoft // tools use this setting along with .file records (storage class FILE). ImageSymDebug = -2 ) var ( errCOFFTableNotPresent = errors.New( "PE image does not contains a COFF symbol table") errNoCOFFStringInTable = errors.New( "PE image got a PointerToSymbolTable but no string in the COFF string table") errCOFFSymbolOutOfBounds = errors.New( "COFF symbol offset out of bounds") errCOFFSymbolsTooHigh = errors.New( "COFF symbols count is absurdly high") ) // COFFSymbol represents an entry in the COFF symbol table, which it is an // array of records, each 18 bytes long. Each record is either a standard or // auxiliary symbol-table record. A standard record defines a symbol or name // and has the following format. type COFFSymbol struct { // The name of the symbol, represented by a union of three structures. An // array of 8 bytes is used if the name is not more than 8 bytes long. // union { // BYTE ShortName[8]; // struct { // DWORD Short; // if 0, use LongName // DWORD Long; // offset into string table // } Name; // DWORD LongName[2]; // PBYTE [2] // } N; Name [8]byte `json:"name"` // The value that is associated with the symbol. The interpretation of this // field depends on SectionNumber and StorageClass. A typical meaning is // the relocatable address. Value uint32 `json:"value"` // The signed integer that identifies the section, using a one-based index // into the section table. Some values have special meaning. // See "Section Number Values." SectionNumber int16 `json:"section_number"` // A number that represents type. Microsoft tools set this field to // 0x20 (function) or 0x0 (not a function). For more information, // see Type Representation. Type uint16 `json:"type"` // An enumerated value that represents storage class. // For more information, see Storage Class. StorageClass uint8 `json:"storage_class"` // The number of auxiliary symbol table entries that follow this record. NumberOfAuxSymbols uint8 `json:"number_of_aux_symbols"` } // COFF holds properties related to the COFF format. type COFF struct { SymbolTable []COFFSymbol `json:"symbol_table"` StringTable []string `json:"string_table"` StringTableOffset uint32 `json:"string_table_offset"` // Map the symbol offset => symbol name. StringTableM map[uint32]string `json:"-"` } // ParseCOFFSymbolTable parses the COFF symbol table. The symbol table is // inherited from the traditional COFF format. It is distinct from Microsoft // Visual C++ debug information. A file can contain both a COFF symbol table // and Visual C++ debug information, and the two are kept separate. Some // Microsoft tools use the symbol table for limited but important purposes, // such as communicating COMDAT information to the linker. Section names and // file names, as well as code and data symbols, are listed in the symbol table. func (pe *File) ParseCOFFSymbolTable() error { pointerToSymbolTable := pe.NtHeader.FileHeader.PointerToSymbolTable if pointerToSymbolTable == 0 { return errCOFFTableNotPresent } symCount := pe.NtHeader.FileHeader.NumberOfSymbols if symCount == 0 { return nil } if symCount > pe.opts.MaxCOFFSymbolsCount { pe.addAnomaly(AnoCOFFSymbolsCount) return errCOFFSymbolsTooHigh } // The location of the symbol table is indicated in the COFF header. offset := pe.NtHeader.FileHeader.PointerToSymbolTable // The symbol table is an array of records, each 18 bytes long. size := uint32(binary.Size(COFFSymbol{})) symbols := make([]COFFSymbol, symCount) // Each record is either a standard or auxiliary symbol-table record. // A standard record defines a symbol or name and has the COFFSymbol STRUCT format. for i := uint32(0); i < symCount; i++ { err := pe.structUnpack(&symbols[i], offset, size) if err != nil { return err } offset += size } pe.COFF.SymbolTable = symbols // Get the COFF string table. pe.COFFStringTable() pe.HasCOFF = true return nil } // COFFStringTable retrieves the list of strings in the COFF string table if // any. func (pe *File) COFFStringTable() error { m := make(map[uint32]string) pointerToSymbolTable := pe.NtHeader.FileHeader.PointerToSymbolTable if pointerToSymbolTable == 0 { return errCOFFTableNotPresent } symCount := pe.NtHeader.FileHeader.NumberOfSymbols if symCount == 0 { return nil } if symCount > pe.opts.MaxCOFFSymbolsCount { pe.addAnomaly(AnoCOFFSymbolsCount) return errCOFFSymbolsTooHigh } // COFF String Table immediately following the COFF symbol table. The // position of this table is found by taking the symbol table address in // the COFF header and adding the number of symbols multiplied by the size // of a symbol. size := uint32(binary.Size(COFFSymbol{})) offset := pointerToSymbolTable + (size * symCount) // At the beginning of the COFF string table are 4 bytes that contain the // total size (in bytes) of the rest of the string table. This size // includes the size field itself, so that the value in this location would // be 4 if no strings were present. pe.COFF.StringTableOffset = offset strTableSize, err := pe.ReadUint32(offset) if err != nil { return err } if strTableSize <= 4 { return errNoCOFFStringInTable } offset += 4 // Following the size are null-terminated strings that are pointed to by // symbols in the COFF symbol table. We create a map to map offset to // string. end := offset + strTableSize - 4 for offset < end { len, str := pe.readASCIIStringAtOffset(offset, MaxCOFFSymStrLength) if len == 0 { break } m[offset] = str offset += len + 1 pe.COFF.StringTable = append(pe.COFF.StringTable, str) } pe.COFF.StringTableM = m return nil } // String returns the representation of the symbol name. func (symbol *COFFSymbol) String(pe *File) (string, error) { var short, long uint32 // The ShortName field in a symbol table consists of 8 bytes // that contain the name itself, if it is not more than 8 // bytes long, or the ShortName field gives an offset into // the string table. highDw := bytes.NewBuffer(symbol.Name[4:]) lowDw := bytes.NewBuffer(symbol.Name[:4]) errl := binary.Read(lowDw, binary.LittleEndian, &short) errh := binary.Read(highDw, binary.LittleEndian, &long) if errl != nil || errh != nil { return "", errCOFFSymbolOutOfBounds } // To determine whether the name itself or an offset is given, // test the first 4 bytes for equality to zero. if short != 0 { name := strings.Replace(string(symbol.Name[:]), "\x00", "", -1) return name, nil } // Long name offset to the string table. strOff := pe.COFF.StringTableOffset + long name := pe.COFF.StringTableM[strOff] return name, nil } // SectionNumberName returns the name of the section corresponding to a section // symbol number if any. func (symbol *COFFSymbol) SectionNumberName(pe *File) string { // Normally, the Section Value field in a symbol table entry is a one-based // index into the section table. However, this field is a signed integer // and can take negative values. The following values, less than one, have // special meanings. if symbol.SectionNumber > 0 && symbol.SectionNumber < int16(len(pe.Sections)) { return pe.Sections[symbol.SectionNumber-1].String() } switch symbol.SectionNumber { case ImageSymUndefined: return "Undefined" case ImageSymAbsolute: return "Absolute" case ImageSymDebug: return "Debug" } return "?" } // PrettyCOFFTypeRepresentation returns the string representation of the `Type` // field of a COFF table entry. func (pe *File) PrettyCOFFTypeRepresentation(k uint16) string { coffSymTypeMap := map[uint16]string{ ImageSymTypeNull: "Null", ImageSymTypeVoid: "Void", ImageSymTypeChar: "Char", ImageSymTypeShort: "Short", ImageSymTypeInt: "Int", ImageSymTypeLong: "Long", ImageSymTypeFloat: "Float", ImageSymTypeDouble: "Double", ImageSymTypeStruct: "Struct", ImageSymTypeUnion: "Union", ImageSymTypeEnum: "Enum", ImageSymTypeMoe: "Moe", ImageSymTypeByte: "Byte", ImageSymTypeWord: "Word", ImageSymTypeUint: "Uint", ImageSymTypeDword: "Dword", } if value, ok := coffSymTypeMap[k]; ok { return value } return "" } ================================================ FILE: symbol_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import "testing" type TestCOFFSymbol struct { errTooManySymbols error symbolsCount int symbolIdx int symbol COFFSymbol stringTableOffset uint32 symbolName string sectionNumberName string symbolTypeString string } var symbolTests = []struct { in string out TestCOFFSymbol }{ { getAbsoluteFilePath("test/liblzo2-2.dll"), TestCOFFSymbol{ errTooManySymbols: nil, symbolsCount: 50, symbolIdx: 0, symbol: COFFSymbol{ Name: [8]byte{0, 0, 0, 0, 4, 0, 0, 0}, Value: 0x2ac, SectionNumber: 8, Type: 0x0, StorageClass: 0x2, NumberOfAuxSymbols: 0x0, }, stringTableOffset: 0x35184, symbolName: "__imp_abort", sectionNumberName: ".idata", symbolTypeString: "Null", }, }, { getAbsoluteFilePath( "test/0103daa751660333b7ae5f098795df58f07e3031563e042d2eb415bffa71fe7a", ), TestCOFFSymbol{ errTooManySymbols: nil, symbolsCount: 346, symbolIdx: 3, symbol: COFFSymbol{ Name: [8]byte{0, 0, 0, 0, 4, 0, 0, 0}, Value: 0x2ac, SectionNumber: 8, Type: 0x0, StorageClass: 0x2, NumberOfAuxSymbols: 0x0, }, stringTableOffset: 0x1b054, symbolName: "___mingw_CRTStartup", sectionNumberName: ".text", symbolTypeString: "", }, }, { getAbsoluteFilePath( "test/0000e876c5b712b6b7b3ce97f757ddd918fb3dbdc5a3938e850716fbd841309f", ), TestCOFFSymbol{ errTooManySymbols: errCOFFSymbolsTooHigh, }, }, } func TestParseCOFFSymbolTable(t *testing.T) { for _, tt := range symbolTests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } err = file.ParseCOFFSymbolTable() if err != tt.out.errTooManySymbols { t.Errorf( "errTooManySymbols assertion failed, reason: %v", tt.out.errTooManySymbols, ) } // exit early when err is errCOFFSymbolsTooHigh. if err == errCOFFSymbolsTooHigh { return } if len(file.COFF.SymbolTable) != tt.out.symbolsCount { t.Errorf( "symbolsCount assertion failed, want: %d, got: %d", tt.out.symbolsCount, len(file.COFF.SymbolTable), ) } if file.COFF.StringTableOffset != tt.out.stringTableOffset { t.Errorf( "stringTableOffset assertion failed, want: %d, got: %d", tt.out.stringTableOffset, file.COFF.StringTableOffset, ) } if !stringInSlice(tt.out.symbolName, file.COFF.StringTable) { t.Errorf( "symbolName assertion failed, want: %s, got: %v", tt.out.symbolName, file.COFF.StringTable, ) } coffSymbol := file.COFF.SymbolTable[tt.out.symbolIdx] symbolNameStr, err := coffSymbol.String(file) if err != nil { t.Errorf("COFFSymbol.String() failed with: %v", err) } if symbolNameStr != tt.out.symbolName { t.Errorf( "symbol name to string failed, want: %s, got: %s", tt.out.symbolName, symbolNameStr, ) } secNumName := coffSymbol.SectionNumberName(file) if secNumName != tt.out.sectionNumberName { t.Errorf( "SectionNumberName assertion failed, want: %s, got: %s", tt.out.sectionNumberName, secNumName, ) } typeString := file.PrettyCOFFTypeRepresentation(coffSymbol.Type) if typeString != tt.out.symbolTypeString { t.Errorf( "PrettyCOFFTypeRepresentation assertion failed, want: %s, got: %s", tt.out.symbolTypeString, typeString, ) } }) } } ================================================ FILE: tls.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "encoding/binary" ) // TLSDirectoryCharacteristicsType represents the type of a TLS directory // Characteristics. type TLSDirectoryCharacteristicsType uint32 // TLSDirectory represents tls directory information with callback entries. type TLSDirectory struct { // of type *IMAGE_TLS_DIRECTORY32 or *IMAGE_TLS_DIRECTORY64 structure. Struct interface{} `json:"struct"` // of type []uint32 or []uint64. Callbacks interface{} `json:"callbacks"` } // ImageTLSDirectory32 represents the IMAGE_TLS_DIRECTORY32 structure. // It Points to the Thread Local Storage initialization section. type ImageTLSDirectory32 struct { // The starting address of the TLS template. The template is a block of data // that is used to initialize TLS data. StartAddressOfRawData uint32 `json:"start_address_of_raw_data"` // The address of the last byte of the TLS, except for the zero fill. // As with the Raw Data Start VA field, this is a VA, not an RVA. EndAddressOfRawData uint32 `json:"end_address_of_raw_data"` // The location to receive the TLS index, which the loader assigns. This // location is in an ordinary data section, so it can be given a symbolic // name that is accessible to the program. AddressOfIndex uint32 `json:"address_of_index"` // The pointer to an array of TLS callback functions. The array is // null-terminated, so if no callback function is supported, this field // points to 4 bytes set to zero. AddressOfCallBacks uint32 `json:"address_of_callbacks"` // The size in bytes of the template, beyond the initialized data delimited // by the Raw Data Start VA and Raw Data End VA fields. The total template // size should be the same as the total size of TLS data in the image file. // The zero fill is the amount of data that comes after the initialized // nonzero data. SizeOfZeroFill uint32 `json:"size_of_zero_fill"` // The four bits [23:20] describe alignment info. Possible values are those // defined as IMAGE_SCN_ALIGN_*, which are also used to describe alignment // of section in object files. The other 28 bits are reserved for future use. Characteristics TLSDirectoryCharacteristicsType `json:"characteristics"` } // ImageTLSDirectory64 represents the IMAGE_TLS_DIRECTORY64 structure. // It Points to the Thread Local Storage initialization section. type ImageTLSDirectory64 struct { // The starting address of the TLS template. The template is a block of data // that is used to initialize TLS data. StartAddressOfRawData uint64 `json:"start_address_of_raw_data"` // The address of the last byte of the TLS, except for the zero fill. As // with the Raw Data Start VA field, this is a VA, not an RVA. EndAddressOfRawData uint64 `json:"end_address_of_raw_data"` // The location to receive the TLS index, which the loader assigns. This // location is in an ordinary data section, so it can be given a symbolic // name that is accessible to the program. AddressOfIndex uint64 `json:"address_of_index"` // The pointer to an array of TLS callback functions. The array is // null-terminated, so if no callback function is supported, this field // points to 4 bytes set to zero. AddressOfCallBacks uint64 `json:"address_of_callbacks"` // The size in bytes of the template, beyond the initialized data delimited // by the Raw Data Start VA and Raw Data End VA fields. The total template // size should be the same as the total size of TLS data in the image file. // The zero fill is the amount of data that comes after the initialized // nonzero data. SizeOfZeroFill uint32 `json:"size_of_zero_fill"` // The four bits [23:20] describe alignment info. Possible values are those // defined as IMAGE_SCN_ALIGN_*, which are also used to describe alignment // of section in object files. The other 28 bits are reserved for future use. Characteristics TLSDirectoryCharacteristicsType `json:"characteristics"` } // TLS provides direct PE and COFF support for static thread local storage (TLS). // TLS is a special storage class that Windows supports in which a data object // is not an automatic (stack) variable, yet is local to each individual thread // that runs the code. Thus, each thread can maintain a different value for a // variable declared by using TLS. func (pe *File) parseTLSDirectory(rva, size uint32) error { tls := TLSDirectory{} if pe.Is64 { tlsDir := ImageTLSDirectory64{} tlsSize := uint32(binary.Size(tlsDir)) fileOffset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&tlsDir, fileOffset, tlsSize) if err != nil { return err } tls.Struct = tlsDir if tlsDir.AddressOfCallBacks != 0 { callbacks := make([]uint64, 0) rvaAddressOfCallBacks := uint32(tlsDir.AddressOfCallBacks - pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase) offset := pe.GetOffsetFromRva(rvaAddressOfCallBacks) for { c, err := pe.ReadUint64(offset) if err != nil || c == 0 { break } callbacks = append(callbacks, c) offset += 8 } tls.Callbacks = callbacks } } else { tlsDir := ImageTLSDirectory32{} tlsSize := uint32(binary.Size(tlsDir)) fileOffset := pe.GetOffsetFromRva(rva) err := pe.structUnpack(&tlsDir, fileOffset, tlsSize) if err != nil { return err } tls.Struct = tlsDir // 94a9dc17d47b03f6fb01cb639e25503b37761b452e7c07ec6b6c2280635f1df9 // Callbacks may be empty. if tlsDir.AddressOfCallBacks != 0 { callbacks := make([]uint32, 0) rvaAddressOfCallBacks := tlsDir.AddressOfCallBacks - pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase offset := pe.GetOffsetFromRva(rvaAddressOfCallBacks) for { c, err := pe.ReadUint32(offset) if err != nil || c == 0 { break } callbacks = append(callbacks, c) offset += 4 } tls.Callbacks = callbacks } } pe.TLS = tls pe.HasTLS = true return nil } // String returns the string representations of the `Characteristics` field of // TLS directory. func (characteristics TLSDirectoryCharacteristicsType) String() string { m := map[TLSDirectoryCharacteristicsType]string{ ImageSectionAlign1Bytes: "Align 1-Byte", ImageSectionAlign2Bytes: "Align 2-Bytes", ImageSectionAlign4Bytes: "Align 4-Bytes", ImageSectionAlign8Bytes: "Align 8-Bytes", ImageSectionAlign16Bytes: "Align 16-Bytes", ImageSectionAlign32Bytes: "Align 32-Bytes", ImageSectionAlign64Bytes: "Align 64-Bytes", ImageSectionAlign128Bytes: "Align 128-Bytes", ImageSectionAlign256Bytes: "Align 265-Bytes", ImageSectionAlign512Bytes: "Align 512-Bytes", ImageSectionAlign1024Bytes: "Align 1024-Bytes", ImageSectionAlign2048Bytes: "Align 2048-Bytes", ImageSectionAlign4096Bytes: "Align 4096-Bytes", ImageSectionAlign8192Bytes: "Align 8192-Bytes", } v, ok := m[characteristics] if ok { return v } return "?" } ================================================ FILE: tls_test.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "reflect" "testing" ) func TestParseTLSDirectory(t *testing.T) { tests := []struct { in string out TLSDirectory }{ { getAbsoluteFilePath("test/liblzo2-2.dll"), TLSDirectory{ Struct: ImageTLSDirectory64{ StartAddressOfRawData: 0x6CBBB000, EndAddressOfRawData: 0x6CBBB060, AddressOfIndex: 0x6CBB75AC, AddressOfCallBacks: 0x6CBBA030, }, Callbacks: []uint64{0x6cbae7e0, 0x6cbae7b0}, }, }, { getAbsoluteFilePath("test/3a081c7fe475ec68ed155c76d30cfddc4d41f7a09169810682d1c75421e98eaa"), TLSDirectory{ Struct: ImageTLSDirectory32{ StartAddressOfRawData: 0x004157B8, EndAddressOfRawData: 0x004157B9, AddressOfIndex: 0x0042F8DC, AddressOfCallBacks: 0x0040E3AC, Characteristics: 0x00100000, }, Callbacks: []uint32{0x40A5A0}, }, }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { ops := Options{Fast: true} file, err := New(tt.in, &ops) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } err = file.Parse() if err != nil { t.Fatalf("Parse(%s) failed, reason: %v", tt.in, err) } var va, size uint32 switch file.Is64 { case true: oh64 := file.NtHeader.OptionalHeader.(ImageOptionalHeader64) dirEntry := oh64.DataDirectory[ImageDirectoryEntryTLS] va = dirEntry.VirtualAddress size = dirEntry.Size case false: oh32 := file.NtHeader.OptionalHeader.(ImageOptionalHeader32) dirEntry := oh32.DataDirectory[ImageDirectoryEntryTLS] va = dirEntry.VirtualAddress size = dirEntry.Size } err = file.parseTLSDirectory(va, size) if err != nil { t.Fatalf("parseRelocDirectory(%s) failed, reason: %v", tt.in, err) } tls := file.TLS if !reflect.DeepEqual(tls, tt.out) { t.Fatalf("TLS directory assertion failed, got %v, want %v", tls.Struct, tt.out) } }) } } func TestTLSDirectoryCharacteristics(t *testing.T) { tests := []struct { in TLSDirectoryCharacteristicsType out string }{ { TLSDirectoryCharacteristicsType(0x00100000), "Align 1-Byte", }, { 0xff, "?", }, } for _, tt := range tests { t.Run(tt.out, func(t *testing.T) { TLSDirectoryCharacteristics := tt.in.String() if TLSDirectoryCharacteristics != tt.out { t.Fatalf("TLS directory characteristics string assertion failed, got %v, want %v", TLSDirectoryCharacteristics, tt.out) } }) } } ================================================ FILE: version.go ================================================ // Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "bytes" "encoding/binary" "fmt" ) const ( // VersionResourceType identifies the version resource type in the resource directory VersionResourceType = 16 // VsVersionInfoString is the UTF16-encoded string that identifies the VS_VERSION_INFO block VsVersionInfoString = "VS_VERSION_INFO" // VsFileInfoSignature is the file info signature VsFileInfoSignature uint32 = 0xFEEF04BD // StringFileInfoString is the UTF16-encoded string that identifies the StringFileInfo block StringFileInfoString = "StringFileInfo" // VarFileInfoString is the UTF16-encoded string that identifies the VarFileInfoString block VarFileInfoString = "VarFileInfo" // VsVersionInfoStringLength specifies the length of the VS_VERSION_INFO structure VsVersionInfoStringLength uint32 = 6 // StringFileInfoLength specifies length of the StringFileInfo structure StringFileInfoLength uint32 = 6 // StringTableLength specifies the length of the StringTable structure StringTableLength uint32 = 6 // StringLength specifies the length of the String structure StringLength uint32 = 6 // LangIDLength specifies the length of the language identifier string. // It is represented as 8-digit hexadecimal number stored as a Unicode string. LangIDLength uint32 = 8*2 + 1 ) // VsVersionInfo represents the organization of data in // a file-version resource. It is the root structure that // contains all other file-version information structures. type VsVersionInfo struct { // Length is the length, in bytes, of the VS_VERSIONINFO structure. // This length does not include any padding that aligns any // subsequent version resource data on a 32-bit boundary. Length uint16 `json:"length"` // ValueLength is the length, in bytes, of arbitrary data associated // with the VS_VERSIONINFO structure. // This value is zero if there is no any data associated with the // current version structure. ValueLength uint16 `json:"value_length"` // Type represents as many zero words as necessary to align the StringFileInfo // and VarFileInfo structures on a 32-bit boundary. These bytes are not included // in ValueLength. Type uint16 `json:"type"` } func (pe *File) parseVersionInfo(e ResourceDirectoryEntry) (*VsVersionInfo, error) { offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) b, err := pe.ReadBytesAtOffset(offset, e.Data.Struct.Size) if err != nil { return nil, err } var v VsVersionInfo if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &v); err != nil { return nil, err } b, err = pe.ReadBytesAtOffset(offset+VsVersionInfoStringLength, uint32(v.ValueLength)) if err != nil { return nil, err } vsVersionString, err := DecodeUTF16String(b) if err != nil { return nil, err } if vsVersionString != VsVersionInfoString { return nil, fmt.Errorf("invalid VS_VERSION_INFO block. %s", vsVersionString) } return &v, nil } // VsFixedFileInfo contains version information for a file. // This information is language and code page independent. type VsFixedFileInfo struct { // Signature contains the value 0xFEEF04BD. This is used // with the `key` member of the VS_VERSIONINFO structure // when searching a file for the VS_FIXEDFILEINFO structure. Signature uint32 `json:"signature"` // StructVer is the binary version number of this structure. // The high-order word of this member contains the major version // number, and the low-order word contains the minor version number. StructVer uint32 `json:"struct_ver"` // FileVersionMS denotes the most significant 32 bits of the file's // binary version number. FileVersionMS uint32 `json:"file_version_ms"` // FileVersionLS denotes the least significant 32 bits of the file's // binary version number. FileVersionLS uint32 `json:"file_version_ls"` // ProductVersionMS represents the most significant 32 bits of the // binary version number of the product with which this file was distributed. ProductVersionMS uint32 `json:"product_version_ms"` // ProductVersionLS represents the most significant 32 bits of the // binary version number of the product with which this file was distributed. ProductVersionLS uint32 `json:"product_version_ls"` // FileFlagMask contains a bitmask that specifies the valid bits in FileFlags. // A bit is valid only if it was defined when the file was created. FileFlagMask uint32 `json:"file_flag_mask"` // FileFlags contains a bitmask that specifies the Boolean attributes of the file. // For example, the file contains debugging information or is compiled with debugging // features enabled if FileFlags is equal to 0x00000001L (VS_FF_DEBUG). FileFlags uint32 `json:"file_flags"` // FileOS represents the operating system for which this file was designed. FileOS uint32 `json:"file_os"` // FileType describes the general type of file. FileType uint32 `json:"file_type"` // FileSubtype specifies the function of the file. The possible values depend on the value of FileType. FileSubtype uint32 `json:"file_subtype"` // FileDateMS are the most significant 32 bits of the file's 64-bit binary creation date and time stamp. FileDateMS uint32 `json:"file_date_ms"` // FileDateLS are the least significant 32 bits of the file's 64-bit binary creation date and time stamp. FileDateLS uint32 `json:"file_date_ls"` } // Size returns the size of this structure in bytes. func (f *VsFixedFileInfo) Size() uint32 { return uint32(binary.Size(f)) } func (f *VsFixedFileInfo) GetStringFileInfoOffset(e ResourceDirectoryEntry) uint32 { return alignDword(VsVersionInfoStringLength+uint32(2*len(VsVersionInfoString)+1)+f.Size(), e.Data.Struct.OffsetToData) } func (f *VsFixedFileInfo) GetOffset(e ResourceDirectoryEntry, pe *File) uint32 { offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + VsVersionInfoStringLength offset += uint32(2*len(VsVersionInfoString)) + 1 return alignDword(offset, e.Data.Struct.OffsetToData) } func (pe *File) parseFixedFileInfo(e ResourceDirectoryEntry) (*VsFixedFileInfo, error) { var f VsFixedFileInfo offset := f.GetOffset(e, pe) b, err := pe.ReadBytesAtOffset(offset, f.Size()) if err != nil { return nil, err } if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &f); err != nil { return nil, err } if f.Signature != VsFileInfoSignature { return nil, fmt.Errorf("invalid file info signature %d", f.Signature) } return &f, nil } // StringFileInfo represents the organization of data in a // file-version resource. It contains version information // that can be displayed for a particular language and code page. type StringFileInfo struct { Length uint16 ValueLength uint16 Type uint16 } func (s *StringFileInfo) GetStringTableOffset(offset uint32) uint32 { return offset + StringFileInfoLength + uint32(2*len(StringFileInfoString)) + 1 } func (s *StringFileInfo) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva return alignDword(offset, e.Data.Struct.OffsetToData) } func (pe *File) parseStringFileInfo(rva uint32, e ResourceDirectoryEntry) (*StringFileInfo, string, error) { var s StringFileInfo offset := s.GetOffset(rva, e, pe) b, err := pe.ReadBytesAtOffset(offset, StringFileInfoLength) if err != nil { return nil, "", err } if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { return nil, "", err } b, err = pe.ReadBytesAtOffset(offset+StringFileInfoLength, uint32(len(StringFileInfoString)*2)+1) if err != nil { return nil, "", err } str, err := DecodeUTF16String(b) return &s, str, err } // StringTable represents the organization of data in a // file-version resource. It contains language and code // page formatting information for the version strings type StringTable struct { Length uint16 ValueLength uint16 Type uint16 } func (s *StringTable) GetStringOffset(offset uint32, e ResourceDirectoryEntry) uint32 { return alignDword(offset+StringTableLength+LangIDLength, e.Data.Struct.OffsetToData) } func (s *StringTable) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva return alignDword(offset, e.Data.Struct.OffsetToData) } func (pe *File) parseStringTable(rva uint32, e ResourceDirectoryEntry) (*StringTable, error) { var s StringTable offset := s.GetOffset(rva, e, pe) b, err := pe.ReadBytesAtOffset(offset, StringTableLength) if err != nil { return nil, err } if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { return nil, err } // Read the 8-digit hexadecimal number stored as a Unicode string. // The four most significant digits represent the language identifier. // The four least significant digits represent the code page for which // the data is formatted. b, err = pe.ReadBytesAtOffset(offset+StringTableLength, (8*2)+1) if err != nil { return nil, err } langID, err := DecodeUTF16String(b) if err != nil { return nil, err } if len(langID) != int(LangIDLength/2) { return nil, fmt.Errorf("invalid language identifier length. Expected: %d, Got: %d", LangIDLength/2, len(langID)) } return &s, nil } // String Represents the organization of data in a // file-version resource. It contains a string that // describes a specific aspect of a file, for example, // a file's version, its copyright notices, or its trademarks. type String struct { Length uint16 ValueLength uint16 Type uint16 } func (s *String) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva return alignDword(offset, e.Data.Struct.OffsetToData) } // variant of GetOffset which also returns the number of bytes which were added // to achieve 32-bit alignment. The padding value needs to be added to the // string length to figure out the offset of the next string func (s *String) getOffsetAndPadding(rva uint32, e ResourceDirectoryEntry, pe *File) (uint32, uint16) { unalignedOffset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva alignedOffset := alignDword(unalignedOffset, e.Data.Struct.OffsetToData) return alignedOffset, uint16(alignedOffset - unalignedOffset) } func (pe *File) parseString(rva uint32, e ResourceDirectoryEntry) (string, string, uint16, error) { var s String offset, padding := s.getOffsetAndPadding(rva, e, pe) b, err := pe.ReadBytesAtOffset(offset, StringLength) if err != nil { return "", "", 0, err } if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { return "", "", 0, err } const maxKeySize = 100 b, err = pe.ReadBytesAtOffset(offset+StringLength, maxKeySize) if err != nil { return "", "", 0, err } key, err := DecodeUTF16String(b) if err != nil { return "", "", 0, err } valueOffset := alignDword(uint32(2*(len(key)+1))+offset+StringLength, e.Data.Struct.OffsetToData) b, err = pe.ReadBytesAtOffset(valueOffset, uint32(2*(s.ValueLength+1))) if err != nil { return "", "", 0, err } value, err := DecodeUTF16String(b) if err != nil { return "", "", 0, err } // The caller of this function uses the string length as an offset to find // the next string in the file. We need add the alignment padding here // since the caller is unaware of the byte alignment, and will add the // string length to the unaligned offset to get the address of the next // string. totalLength := s.Length + padding return key, value, totalLength, nil } // ParseVersionResources parses file version strings from the version resource // directory. This directory contains several structures starting with VS_VERSION_INFO // with references to children StringFileInfo structures. In addition, StringFileInfo // contains the StringTable structure with String entries describing the name and value // of each file version strings. func (pe *File) ParseVersionResources() (map[string]string, error) { vers := make(map[string]string) if pe.opts.OmitResourceDirectory { return vers, nil } for _, e := range pe.Resources.Entries { if e.ID != VersionResourceType { continue } if len(e.Directory.Entries) == 0 { continue } directory := e.Directory.Entries[0].Directory for _, e := range directory.Entries { m, err := pe.parseVersionEntry(e, vers) if err != nil { return m, err } } } return vers, nil } func (pe *File) parseVersionEntry(e ResourceDirectoryEntry, vers map[string]string) (map[string]string, error) { ver, err := pe.parseVersionInfo(e) if err != nil { return vers, err } ff, err := pe.parseFixedFileInfo(e) if err != nil { return vers, err } offset := ff.GetStringFileInfoOffset(e) for { f, n, err := pe.parseStringFileInfo(offset, e) if err != nil || f.Length == 0 { break } switch n { case StringFileInfoString: tableOffset := f.GetStringTableOffset(offset) for { table, err := pe.parseStringTable(tableOffset, e) if err != nil { break } stringOffset := table.GetStringOffset(tableOffset, e) for stringOffset < tableOffset+uint32(table.Length) { k, v, l, err := pe.parseString(stringOffset, e) if err != nil { break } vers[k] = v if l == 0 { stringOffset = tableOffset + uint32(table.Length) } else { stringOffset = stringOffset + uint32(l) } } // handle potential infinite loops if uint32(table.Length)+tableOffset > tableOffset { break } if tableOffset > uint32(f.Length) { break } } case VarFileInfoString: default: } offset += uint32(f.Length) // StringFileInfo/VarFileinfo structs consumed? if offset >= uint32(ver.Length) { break } } return nil, nil } // ParseVersionResourcesForEntries parses file version strings from the version resource // directory. This directory contains several structures starting with VS_VERSION_INFO // with references to children StringFileInfo structures. In addition, StringFileInfo // contains the StringTable structure with String entries describing the name and value // of each file version strings. func (pe *File) ParseVersionResourcesForEntries() ([]map[string]string, error) { var allVersions []map[string]string if pe.opts.OmitResourceDirectory { return allVersions, nil } for _, e := range pe.Resources.Entries { if e.ID != VersionResourceType { continue } directory := e.Directory.Entries[0].Directory for _, e := range directory.Entries { vers := make(map[string]string) allVersions = append(allVersions, vers) _, err := pe.parseVersionEntry(e, vers) if err != nil { return allVersions, err } } } return allVersions, nil } ================================================ FILE: version_test.go ================================================ // Copyright 2021 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package pe import ( "testing" ) var peVersionResourceTests = []struct { in string out error versionResources map[string]string }{ { getAbsoluteFilePath("test/putty.exe"), nil, map[string]string{"CompanyName": "Simon Tatham", "FileDescription": "SSH, Telnet and Rlogin client", "FileVersion": "Release 0.73 (with embedded help)", "InternalName": "PuTTY", "OriginalFilename": "PuTTY", "ProductName": "PuTTY suite", "ProductVersion": "Release 0.73"}, }, { getAbsoluteFilePath("test/brave.exe"), nil, map[string]string{"CompanyName": "Brave Software, Inc.", "FileDescription": "Brave Browser", "FileVersion": "80.1.7.92", "InternalName": "chrome_exe"}, }, { getAbsoluteFilePath("test/impbyord.exe"), nil, map[string]string{}, }, { getAbsoluteFilePath("test/WdBoot.sys"), nil, map[string]string{"CompanyName": "Microsoft Corporation", "FileDescription": "Microsoft antimalware boot driver", "FileVersion": "4.18.1906.3 (GitEnlistment(winpbld).190621-1227)", "InternalName": "WdBoot"}, }, { getAbsoluteFilePath("test/shimeng.dll"), nil, map[string]string{"CompanyName": "Microsoft Corporation", "FileDescription": "Shim Engine DLL", "FileVersion": "10.0.17763.1 (WinBuild.160101.0800)", "OriginalFilename": "Shim Engine DLL (IAT)", "LegalCopyright": "© Microsoft Corporation. All rights reserved.", "InternalName": "Shim Engine DLL (IAT)", "ProductName": "Microsoft® Windows® Operating System", "ProductVersion": "10.0.17763.1"}, }, { getAbsoluteFilePath("test/pwsh.exe"), nil, map[string]string{"Assembly Version": "7.3.4.500", "Comments": "PowerShell on Windows top-level project", "CompanyName": "Microsoft Corporation", "FileDescription": "pwsh", "FileVersion": "7.3.4.500", "InternalName": "pwsh.dll", "LegalCopyright": "(c) Microsoft Corporation.", "OriginalFilename": "pwsh.dll", "ProductName": "PowerShell", "ProductVersion": "7.3.4 SHA: b59f05d5a1b2fceca231f75c53c203a02edf6203"}, }, { getAbsoluteFilePath("test/YourPhone.Exp.WinRT.dll"), nil, map[string]string{"CompanyName": "Microsoft Corporation", "FileDescription": "", "FileVersion": "1.24052.124.0", "OriginalFilename": "YourPhone.Exp.WinRT.dll", "LegalCopyright": "© Microsoft Corporation. All rights reserved.", "InternalName": "YourPhone.Exp.WinRT", "ProductName": "Microsoft Phone Link", "ProductVersion": "1.24052.124.0"}, }, } func TestParseVersionResources(t *testing.T) { for _, tt := range peVersionResourceTests { t.Run(tt.in, func(t *testing.T) { file, err := New(tt.in, &Options{}) if err != nil { t.Fatalf("New(%s) failed, reason: %v", tt.in, err) } got := file.Parse() if got != nil { t.Errorf("Parse(%s) got %v, want %v", tt.in, got, tt.out) } vers, err := file.ParseVersionResources() if err != nil { t.Fatalf("ParseVersionResurces(%s) failed, reason: %v", tt.in, err) } for k, v := range tt.versionResources { val, ok := vers[k] if !ok { t.Errorf("%s: should have %s version resource", tt.in, k) } if val != v { t.Errorf("%s: expected: %s version resource got: %s. Available resources: %v", tt.in, v, val, vers) } } }) } }