Repository: brexhq/substation Branch: main Commit: 28f1b349b4da Files: 64 Total size: 178.6 KB Directory structure: gitextract_rrrj7zse/ ├── .devcontainer/ │ ├── Dockerfile │ ├── devcontainer.json │ └── post_start.sh ├── .dockerignore ├── .git/ │ ├── HEAD │ ├── config │ ├── description │ ├── hooks/ │ │ ├── applypatch-msg.sample │ │ ├── commit-msg.sample │ │ ├── fsmonitor-watchman.sample │ │ ├── post-update.sample │ │ ├── pre-applypatch.sample │ │ ├── pre-commit.sample │ │ ├── pre-merge-commit.sample │ │ ├── pre-push.sample │ │ ├── pre-rebase.sample │ │ ├── pre-receive.sample │ │ ├── prepare-commit-msg.sample │ │ ├── push-to-checkout.sample │ │ ├── sendemail-validate.sample │ │ └── update.sample │ ├── index │ ├── info/ │ │ └── exclude │ ├── logs/ │ │ ├── HEAD │ │ └── refs/ │ │ ├── heads/ │ │ │ └── main │ │ └── remotes/ │ │ └── origin/ │ │ └── HEAD │ ├── objects/ │ │ └── pack/ │ │ ├── pack-1c76da16733430db3ab67c8402e095f5f4fcf914.idx │ │ ├── pack-1c76da16733430db3ab67c8402e095f5f4fcf914.pack │ │ ├── pack-1c76da16733430db3ab67c8402e095f5f4fcf914.promisor │ │ ├── pack-1c76da16733430db3ab67c8402e095f5f4fcf914.rev │ │ ├── pack-e1e73a8714333d259732152371bb7e014c5153a6.idx │ │ ├── pack-e1e73a8714333d259732152371bb7e014c5153a6.pack │ │ ├── pack-e1e73a8714333d259732152371bb7e014c5153a6.promisor │ │ └── pack-e1e73a8714333d259732152371bb7e014c5153a6.rev │ ├── packed-refs │ ├── refs/ │ │ ├── heads/ │ │ │ └── main │ │ └── remotes/ │ │ └── origin/ │ │ └── HEAD │ └── shallow ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── adopters.yaml │ │ ├── bug_report.md │ │ └── feature_request.md │ ├── pull_request_template.md │ └── workflows/ │ ├── code.yml │ ├── code_jsonnet.sh │ ├── conventional_commits.yml │ ├── release_please.yml │ └── scorecard.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── .vscode/ │ └── settings.json ├── ADOPTERS.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── MIGRATION.md ├── README.md ├── SECURITY.md ├── VERSIONING.md ├── go.mod ├── go.sum ├── substation.go ├── substation.libsonnet ├── substation_test.go └── substation_test.jsonnet ================================================ FILE CONTENTS ================================================ ================================================ FILE: .devcontainer/Dockerfile ================================================ FROM --platform=linux/arm64 mcr.microsoft.com/vscode/devcontainers/go:dev-1.24 RUN apt-get update -y && \ # Docker curl -fsSL https://get.docker.com | sh && \ # https://developer.hashicorp.com/terraform/downloads wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg && \ echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/hashicorp.list && \ apt update -y && apt install -y terraform && \ # go-jsonnet go install github.com/google/go-jsonnet/cmd/jsonnet@latest && \ go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest && \ go install github.com/google/go-jsonnet/cmd/jsonnet-lint@latest && \ # AWS CLI apt-get install -y awscli && \ # python apt-get install -y python3 python3-boto3 black && \ # GCP CLI curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg && \ echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ apt-get update -y && apt-get install -y google-cloud-cli ================================================ FILE: .devcontainer/devcontainer.json ================================================ { "name": "Go", "build": { "dockerfile": "Dockerfile" }, "remoteUser": "root", "runArgs": [ "--memory=2g", "--cpus=2" ], "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], "postStartCommand": "sh .devcontainer/post_start.sh", "customizations": { "vscode": { "extensions": [ "ms-vscode.go", "hashicorp.terraform", "grafana.vscode-jsonnet" ] } } } ================================================ FILE: .devcontainer/post_start.sh ================================================ git config --global --add safe.directory /workspaces/substation rm -f ~/.docker/config.json ================================================ FILE: .dockerignore ================================================ .devcontainer .git .github .vscode build/container build/scripts !build/scripts/aws/lambda/extension.zip build/terraform **/examples .dockerignore .gitignore .golangci.yml CODEOWNERS **/*.md ================================================ FILE: .git/HEAD ================================================ ref: refs/heads/main ================================================ FILE: .git/config ================================================ [core] repositoryformatversion = 1 filemode = true bare = false logallrefupdates = true [remote "origin"] url = https://github.com/brexhq/substation tagOpt = --no-tags fetch = +refs/heads/main:refs/remotes/origin/main promisor = true partialclonefilter = blob:limit=1048576 [branch "main"] remote = origin merge = refs/heads/main ================================================ FILE: .git/description ================================================ Unnamed repository; edit this file 'description' to name the repository. ================================================ FILE: .git/hooks/applypatch-msg.sample ================================================ #!/bin/sh # # An example hook script to check the commit log message taken by # applypatch from an e-mail message. # # The hook should exit with non-zero status after issuing an # appropriate message if it wants to stop the commit. The hook is # allowed to edit the commit message file. # # To enable this hook, rename this file to "applypatch-msg". . git-sh-setup commitmsg="$(git rev-parse --git-path hooks/commit-msg)" test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"} : ================================================ FILE: .git/hooks/commit-msg.sample ================================================ #!/bin/sh # # An example hook script to check the commit log message. # Called by "git commit" with one argument, the name of the file # that has the commit message. The hook should exit with non-zero # status after issuing an appropriate message if it wants to stop the # commit. The hook is allowed to edit the commit message file. # # To enable this hook, rename this file to "commit-msg". # Uncomment the below to add a Signed-off-by line to the message. # Doing this in a hook is a bad idea in general, but the prepare-commit-msg # hook is more suited to it. # # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" # This example catches duplicate Signed-off-by lines. test "" = "$(grep '^Signed-off-by: ' "$1" | sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { echo >&2 Duplicate Signed-off-by lines. exit 1 } ================================================ FILE: .git/hooks/fsmonitor-watchman.sample ================================================ #!/usr/bin/perl use strict; use warnings; use IPC::Open2; # An example hook script to integrate Watchman # (https://facebook.github.io/watchman/) with git to speed up detecting # new and modified files. # # The hook is passed a version (currently 2) and last update token # formatted as a string and outputs to stdout a new update token and # all files that have been modified since the update token. Paths must # be relative to the root of the working tree and separated by a single NUL. # # To enable this hook, rename this file to "query-watchman" and set # 'git config core.fsmonitor .git/hooks/query-watchman' # my ($version, $last_update_token) = @ARGV; # Uncomment for debugging # print STDERR "$0 $version $last_update_token\n"; # Check the hook interface version if ($version ne 2) { die "Unsupported query-fsmonitor hook version '$version'.\n" . "Falling back to scanning...\n"; } my $git_work_tree = get_working_dir(); my $retry = 1; my $json_pkg; eval { require JSON::XS; $json_pkg = "JSON::XS"; 1; } or do { require JSON::PP; $json_pkg = "JSON::PP"; }; launch_watchman(); sub launch_watchman { my $o = watchman_query(); if (is_work_tree_watched($o)) { output_result($o->{clock}, @{$o->{files}}); } } sub output_result { my ($clockid, @files) = @_; # Uncomment for debugging watchman output # open (my $fh, ">", ".git/watchman-output.out"); # binmode $fh, ":utf8"; # print $fh "$clockid\n@files\n"; # close $fh; binmode STDOUT, ":utf8"; print $clockid; print "\0"; local $, = "\0"; print @files; } sub watchman_clock { my $response = qx/watchman clock "$git_work_tree"/; die "Failed to get clock id on '$git_work_tree'.\n" . "Falling back to scanning...\n" if $? != 0; return $json_pkg->new->utf8->decode($response); } sub watchman_query { my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') or die "open2() failed: $!\n" . "Falling back to scanning...\n"; # In the query expression below we're asking for names of files that # changed since $last_update_token but not from the .git folder. # # To accomplish this, we're using the "since" generator to use the # recency index to select candidate nodes and "fields" to limit the # output to file names only. Then we're using the "expression" term to # further constrain the results. my $last_update_line = ""; if (substr($last_update_token, 0, 1) eq "c") { $last_update_token = "\"$last_update_token\""; $last_update_line = qq[\n"since": $last_update_token,]; } my $query = <<" END"; ["query", "$git_work_tree", {$last_update_line "fields": ["name"], "expression": ["not", ["dirname", ".git"]] }] END # Uncomment for debugging the watchman query # open (my $fh, ">", ".git/watchman-query.json"); # print $fh $query; # close $fh; print CHLD_IN $query; close CHLD_IN; my $response = do {local $/; }; # Uncomment for debugging the watch response # open ($fh, ">", ".git/watchman-response.json"); # print $fh $response; # close $fh; die "Watchman: command returned no output.\n" . "Falling back to scanning...\n" if $response eq ""; die "Watchman: command returned invalid output: $response\n" . "Falling back to scanning...\n" unless $response =~ /^\{/; return $json_pkg->new->utf8->decode($response); } sub is_work_tree_watched { my ($output) = @_; my $error = $output->{error}; if ($retry > 0 and $error and $error =~ m/unable to resolve root .* directory (.*) is not watched/) { $retry--; my $response = qx/watchman watch "$git_work_tree"/; die "Failed to make watchman watch '$git_work_tree'.\n" . "Falling back to scanning...\n" if $? != 0; $output = $json_pkg->new->utf8->decode($response); $error = $output->{error}; die "Watchman: $error.\n" . "Falling back to scanning...\n" if $error; # Uncomment for debugging watchman output # open (my $fh, ">", ".git/watchman-output.out"); # close $fh; # Watchman will always return all files on the first query so # return the fast "everything is dirty" flag to git and do the # Watchman query just to get it over with now so we won't pay # the cost in git to look up each individual file. my $o = watchman_clock(); $error = $output->{error}; die "Watchman: $error.\n" . "Falling back to scanning...\n" if $error; output_result($o->{clock}, ("/")); $last_update_token = $o->{clock}; eval { launch_watchman() }; return 0; } die "Watchman: $error.\n" . "Falling back to scanning...\n" if $error; return 1; } sub get_working_dir { my $working_dir; if ($^O =~ 'msys' || $^O =~ 'cygwin') { $working_dir = Win32::GetCwd(); $working_dir =~ tr/\\/\//; } else { require Cwd; $working_dir = Cwd::cwd(); } return $working_dir; } ================================================ FILE: .git/hooks/post-update.sample ================================================ #!/bin/sh # # An example hook script to prepare a packed repository for use over # dumb transports. # # To enable this hook, rename this file to "post-update". exec git update-server-info ================================================ FILE: .git/hooks/pre-applypatch.sample ================================================ #!/bin/sh # # An example hook script to verify what is about to be committed # by applypatch from an e-mail message. # # The hook should exit with non-zero status after issuing an # appropriate message if it wants to stop the commit. # # To enable this hook, rename this file to "pre-applypatch". . git-sh-setup precommit="$(git rev-parse --git-path hooks/pre-commit)" test -x "$precommit" && exec "$precommit" ${1+"$@"} : ================================================ FILE: .git/hooks/pre-commit.sample ================================================ #!/bin/sh # # An example hook script to verify what is about to be committed. # Called by "git commit" with no arguments. The hook should # exit with non-zero status after issuing an appropriate message if # it wants to stop the commit. # # To enable this hook, rename this file to "pre-commit". if git rev-parse --verify HEAD >/dev/null 2>&1 then against=HEAD else # Initial commit: diff against an empty tree object against=$(git hash-object -t tree /dev/null) fi # If you want to allow non-ASCII filenames set this variable to true. allownonascii=$(git config --type=bool hooks.allownonascii) # Redirect output to stderr. exec 1>&2 # Cross platform projects tend to avoid non-ASCII filenames; prevent # them from being added to the repository. We exploit the fact that the # printable range starts at the space character and ends with tilde. if [ "$allownonascii" != "true" ] && # Note that the use of brackets around a tr range is ok here, (it's # even required, for portability to Solaris 10's /usr/bin/tr), since # the square bracket bytes happen to fall in the designated range. test $(git diff-index --cached --name-only --diff-filter=A -z $against | LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 then cat <<\EOF Error: Attempt to add a non-ASCII file name. This can cause problems if you want to work with people on other platforms. To be portable it is advisable to rename the file. If you know what you are doing you can disable this check using: git config hooks.allownonascii true EOF exit 1 fi # If there are whitespace errors, print the offending file names and fail. exec git diff-index --check --cached $against -- ================================================ FILE: .git/hooks/pre-merge-commit.sample ================================================ #!/bin/sh # # An example hook script to verify what is about to be committed. # Called by "git merge" with no arguments. The hook should # exit with non-zero status after issuing an appropriate message to # stderr if it wants to stop the merge commit. # # To enable this hook, rename this file to "pre-merge-commit". . git-sh-setup test -x "$GIT_DIR/hooks/pre-commit" && exec "$GIT_DIR/hooks/pre-commit" : ================================================ FILE: .git/hooks/pre-push.sample ================================================ #!/bin/sh # An example hook script to verify what is about to be pushed. Called by "git # push" after it has checked the remote status, but before anything has been # pushed. If this script exits with a non-zero status nothing will be pushed. # # This hook is called with the following parameters: # # $1 -- Name of the remote to which the push is being done # $2 -- URL to which the push is being done # # If pushing without using a named remote those arguments will be equal. # # Information about the commits which are being pushed is supplied as lines to # the standard input in the form: # # # # This sample shows how to prevent push of commits where the log message starts # with "WIP" (work in progress). remote="$1" url="$2" zero=$(git hash-object --stdin &2 "Found WIP commit in $local_ref, not pushing" exit 1 fi fi done exit 0 ================================================ FILE: .git/hooks/pre-rebase.sample ================================================ #!/bin/sh # # Copyright (c) 2006, 2008 Junio C Hamano # # The "pre-rebase" hook is run just before "git rebase" starts doing # its job, and can prevent the command from running by exiting with # non-zero status. # # The hook is called with the following parameters: # # $1 -- the upstream the series was forked from. # $2 -- the branch being rebased (or empty when rebasing the current branch). # # This sample shows how to prevent topic branches that are already # merged to 'next' branch from getting rebased, because allowing it # would result in rebasing already published history. publish=next basebranch="$1" if test "$#" = 2 then topic="refs/heads/$2" else topic=`git symbolic-ref HEAD` || exit 0 ;# we do not interrupt rebasing detached HEAD fi case "$topic" in refs/heads/??/*) ;; *) exit 0 ;# we do not interrupt others. ;; esac # Now we are dealing with a topic branch being rebased # on top of master. Is it OK to rebase it? # Does the topic really exist? git show-ref -q "$topic" || { echo >&2 "No such branch $topic" exit 1 } # Is topic fully merged to master? not_in_master=`git rev-list --pretty=oneline ^master "$topic"` if test -z "$not_in_master" then echo >&2 "$topic is fully merged to master; better remove it." exit 1 ;# we could allow it, but there is no point. fi # Is topic ever merged to next? If so you should not be rebasing it. only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` only_next_2=`git rev-list ^master ${publish} | sort` if test "$only_next_1" = "$only_next_2" then not_in_topic=`git rev-list "^$topic" master` if test -z "$not_in_topic" then echo >&2 "$topic is already up to date with master" exit 1 ;# we could allow it, but there is no point. else exit 0 fi else not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` /usr/bin/perl -e ' my $topic = $ARGV[0]; my $msg = "* $topic has commits already merged to public branch:\n"; my (%not_in_next) = map { /^([0-9a-f]+) /; ($1 => 1); } split(/\n/, $ARGV[1]); for my $elem (map { /^([0-9a-f]+) (.*)$/; [$1 => $2]; } split(/\n/, $ARGV[2])) { if (!exists $not_in_next{$elem->[0]}) { if ($msg) { print STDERR $msg; undef $msg; } print STDERR " $elem->[1]\n"; } } ' "$topic" "$not_in_next" "$not_in_master" exit 1 fi <<\DOC_END This sample hook safeguards topic branches that have been published from being rewound. The workflow assumed here is: * Once a topic branch forks from "master", "master" is never merged into it again (either directly or indirectly). * Once a topic branch is fully cooked and merged into "master", it is deleted. If you need to build on top of it to correct earlier mistakes, a new topic branch is created by forking at the tip of the "master". This is not strictly necessary, but it makes it easier to keep your history simple. * Whenever you need to test or publish your changes to topic branches, merge them into "next" branch. The script, being an example, hardcodes the publish branch name to be "next", but it is trivial to make it configurable via $GIT_DIR/config mechanism. With this workflow, you would want to know: (1) ... if a topic branch has ever been merged to "next". Young topic branches can have stupid mistakes you would rather clean up before publishing, and things that have not been merged into other branches can be easily rebased without affecting other people. But once it is published, you would not want to rewind it. (2) ... if a topic branch has been fully merged to "master". Then you can delete it. More importantly, you should not build on top of it -- other people may already want to change things related to the topic as patches against your "master", so if you need further changes, it is better to fork the topic (perhaps with the same name) afresh from the tip of "master". Let's look at this example: o---o---o---o---o---o---o---o---o---o "next" / / / / / a---a---b A / / / / / / / / c---c---c---c B / / / / \ / / / / b---b C \ / / / / / \ / ---o---o---o---o---o---o---o---o---o---o---o "master" A, B and C are topic branches. * A has one fix since it was merged up to "next". * B has finished. It has been fully merged up to "master" and "next", and is ready to be deleted. * C has not merged to "next" at all. We would want to allow C to be rebased, refuse A, and encourage B to be deleted. To compute (1): git rev-list ^master ^topic next git rev-list ^master next if these match, topic has not merged in next at all. To compute (2): git rev-list master..topic if this is empty, it is fully merged to "master". DOC_END ================================================ FILE: .git/hooks/pre-receive.sample ================================================ #!/bin/sh # # An example hook script to make use of push options. # The example simply echoes all push options that start with 'echoback=' # and rejects all pushes when the "reject" push option is used. # # To enable this hook, rename this file to "pre-receive". if test -n "$GIT_PUSH_OPTION_COUNT" then i=0 while test "$i" -lt "$GIT_PUSH_OPTION_COUNT" do eval "value=\$GIT_PUSH_OPTION_$i" case "$value" in echoback=*) echo "echo from the pre-receive-hook: ${value#*=}" >&2 ;; reject) exit 1 esac i=$((i + 1)) done fi ================================================ FILE: .git/hooks/prepare-commit-msg.sample ================================================ #!/bin/sh # # An example hook script to prepare the commit log message. # Called by "git commit" with the name of the file that has the # commit message, followed by the description of the commit # message's source. The hook's purpose is to edit the commit # message file. If the hook fails with a non-zero status, # the commit is aborted. # # To enable this hook, rename this file to "prepare-commit-msg". # This hook includes three examples. The first one removes the # "# Please enter the commit message..." help message. # # The second includes the output of "git diff --name-status -r" # into the message, just before the "git status" output. It is # commented because it doesn't cope with --amend or with squashed # commits. # # The third example adds a Signed-off-by line to the message, that can # still be edited. This is rarely a good idea. COMMIT_MSG_FILE=$1 COMMIT_SOURCE=$2 SHA1=$3 /usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE" # case "$COMMIT_SOURCE,$SHA1" in # ,|template,) # /usr/bin/perl -i.bak -pe ' # print "\n" . `git diff --cached --name-status -r` # if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;; # *) ;; # esac # SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') # git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE" # if test -z "$COMMIT_SOURCE" # then # /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE" # fi ================================================ FILE: .git/hooks/push-to-checkout.sample ================================================ #!/bin/sh # An example hook script to update a checked-out tree on a git push. # # This hook is invoked by git-receive-pack(1) when it reacts to git # push and updates reference(s) in its repository, and when the push # tries to update the branch that is currently checked out and the # receive.denyCurrentBranch configuration variable is set to # updateInstead. # # By default, such a push is refused if the working tree and the index # of the remote repository has any difference from the currently # checked out commit; when both the working tree and the index match # the current commit, they are updated to match the newly pushed tip # of the branch. This hook is to be used to override the default # behaviour; however the code below reimplements the default behaviour # as a starting point for convenient modification. # # The hook receives the commit with which the tip of the current # branch is going to be updated: commit=$1 # It can exit with a non-zero status to refuse the push (when it does # so, it must not modify the index or the working tree). die () { echo >&2 "$*" exit 1 } # Or it can make any necessary changes to the working tree and to the # index to bring them to the desired state when the tip of the current # branch is updated to the new commit, and exit with a zero status. # # For example, the hook can simply run git read-tree -u -m HEAD "$1" # in order to emulate git fetch that is run in the reverse direction # with git push, as the two-tree form of git read-tree -u -m is # essentially the same as git switch or git checkout that switches # branches while keeping the local changes in the working tree that do # not interfere with the difference between the branches. # The below is a more-or-less exact translation to shell of the C code # for the default behaviour for git's push-to-checkout hook defined in # the push_to_deploy() function in builtin/receive-pack.c. # # Note that the hook will be executed from the repository directory, # not from the working tree, so if you want to perform operations on # the working tree, you will have to adapt your code accordingly, e.g. # by adding "cd .." or using relative paths. if ! git update-index -q --ignore-submodules --refresh then die "Up-to-date check failed" fi if ! git diff-files --quiet --ignore-submodules -- then die "Working directory has unstaged changes" fi # This is a rough translation of: # # head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX if git cat-file -e HEAD 2>/dev/null then head=HEAD else head=$(git hash-object -t tree --stdin &2 exit 1 } unset GIT_DIR GIT_WORK_TREE cd "$worktree" && if grep -q "^diff --git " "$1" then validate_patch "$1" else validate_cover_letter "$1" fi && if test "$GIT_SENDEMAIL_FILE_COUNTER" = "$GIT_SENDEMAIL_FILE_TOTAL" then git config --unset-all sendemail.validateWorktree && trap 'git worktree remove -ff "$worktree"' EXIT && validate_series fi ================================================ FILE: .git/hooks/update.sample ================================================ #!/bin/sh # # An example hook script to block unannotated tags from entering. # Called by "git receive-pack" with arguments: refname sha1-old sha1-new # # To enable this hook, rename this file to "update". # # Config # ------ # hooks.allowunannotated # This boolean sets whether unannotated tags will be allowed into the # repository. By default they won't be. # hooks.allowdeletetag # This boolean sets whether deleting tags will be allowed in the # repository. By default they won't be. # hooks.allowmodifytag # This boolean sets whether a tag may be modified after creation. By default # it won't be. # hooks.allowdeletebranch # This boolean sets whether deleting branches will be allowed in the # repository. By default they won't be. # hooks.denycreatebranch # This boolean sets whether remotely creating branches will be denied # in the repository. By default this is allowed. # # --- Command line refname="$1" oldrev="$2" newrev="$3" # --- Safety check if [ -z "$GIT_DIR" ]; then echo "Don't run this script from the command line." >&2 echo " (if you want, you could supply GIT_DIR then run" >&2 echo " $0 )" >&2 exit 1 fi if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then echo "usage: $0 " >&2 exit 1 fi # --- Config allowunannotated=$(git config --type=bool hooks.allowunannotated) allowdeletebranch=$(git config --type=bool hooks.allowdeletebranch) denycreatebranch=$(git config --type=bool hooks.denycreatebranch) allowdeletetag=$(git config --type=bool hooks.allowdeletetag) allowmodifytag=$(git config --type=bool hooks.allowmodifytag) # check for no description projectdesc=$(sed -e '1q' "$GIT_DIR/description") case "$projectdesc" in "Unnamed repository"* | "") echo "*** Project description file hasn't been set" >&2 exit 1 ;; esac # --- Check types # if $newrev is 0000...0000, it's a commit to delete a ref. zero=$(git hash-object --stdin &2 echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 exit 1 fi ;; refs/tags/*,delete) # delete tag if [ "$allowdeletetag" != "true" ]; then echo "*** Deleting a tag is not allowed in this repository" >&2 exit 1 fi ;; refs/tags/*,tag) # annotated tag if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 then echo "*** Tag '$refname' already exists." >&2 echo "*** Modifying a tag is not allowed in this repository." >&2 exit 1 fi ;; refs/heads/*,commit) # branch if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then echo "*** Creating a branch is not allowed in this repository" >&2 exit 1 fi ;; refs/heads/*,delete) # delete branch if [ "$allowdeletebranch" != "true" ]; then echo "*** Deleting a branch is not allowed in this repository" >&2 exit 1 fi ;; refs/remotes/*,commit) # tracking branch ;; refs/remotes/*,delete) # delete tracking branch if [ "$allowdeletebranch" != "true" ]; then echo "*** Deleting a tracking branch is not allowed in this repository" >&2 exit 1 fi ;; *) # Anything else (is there anything else?) echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 exit 1 ;; esac # --- Finished exit 0 ================================================ FILE: .git/info/exclude ================================================ # git ls-files --others --exclude-from=.git/info/exclude # Lines that start with '#' are comments. # For a project mostly in C, the following would be a good set of # exclude patterns (uncomment them if you want to use them): # *.[oa] # *~ ================================================ FILE: .git/logs/HEAD ================================================ 0000000000000000000000000000000000000000 28f1b349b4da090a4cf04af91adf1821ca7fd687 appuser 1776671609 +0000 clone: from https://github.com/brexhq/substation ================================================ FILE: .git/logs/refs/heads/main ================================================ 0000000000000000000000000000000000000000 28f1b349b4da090a4cf04af91adf1821ca7fd687 appuser 1776671609 +0000 clone: from https://github.com/brexhq/substation ================================================ FILE: .git/logs/refs/remotes/origin/HEAD ================================================ 0000000000000000000000000000000000000000 28f1b349b4da090a4cf04af91adf1821ca7fd687 appuser 1776671609 +0000 clone: from https://github.com/brexhq/substation ================================================ FILE: .git/objects/pack/pack-1c76da16733430db3ab67c8402e095f5f4fcf914.promisor ================================================ ================================================ FILE: .git/objects/pack/pack-e1e73a8714333d259732152371bb7e014c5153a6.promisor ================================================ 28f1b349b4da090a4cf04af91adf1821ca7fd687 refs/heads/main ================================================ FILE: .git/packed-refs ================================================ # pack-refs with: peeled fully-peeled sorted 28f1b349b4da090a4cf04af91adf1821ca7fd687 refs/remotes/origin/main ================================================ FILE: .git/refs/heads/main ================================================ 28f1b349b4da090a4cf04af91adf1821ca7fd687 ================================================ FILE: .git/refs/remotes/origin/HEAD ================================================ ref: refs/remotes/origin/main ================================================ FILE: .git/shallow ================================================ 28f1b349b4da090a4cf04af91adf1821ca7fd687 ================================================ FILE: .github/ISSUE_TEMPLATE/adopters.yaml ================================================ name: Become an Adopter description: Add the name of your organization to the list of adopters. title: '[organization] has adopted Substation!' body: - type: markdown attributes: value: | Thank you for supporting Substation! By adding your organization to the list of adopters, you help raise awareness for the project and grow our community of users. Please fill out the information below to be added to the [list of adopters](https://github.com/brexhq/substation/blob/main/ADOPTERS.md). - type: input id: org-name attributes: label: Organization Name description: Name of your organization. placeholder: ex. Acme Corp validations: required: true - type: input id: org-url attributes: label: Organization Website description: Link to your organization's website. placeholder: ex. https://www.example.com validations: required: true - type: dropdown id: stage attributes: label: Stage of Adoption description: What is your current stage of adoption? options: - We're learning about Substation - We're testing Substation - We're using Substation in production - We're driving broad adoption of Substation default: 0 validations: required: true - type: textarea id: use-case attributes: label: Description of Use description: Write one or two sentences about how your organization is using Substation. validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Build (please complete the following information):** - Architecture: [e.g. amd64] - Version [e.g. 0.y.z] **Cloud (please complete the following information):** - Provider: [e.g. AWS] - Service: [e.g. Kinesis, Lambda] **Additional context** Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/pull_request_template.md ================================================ ## Description ## Motivation and Context ## How Has This Been Tested? ## Types of changes * [ ] Bug fix (non-breaking change which fixes an issue) * [ ] New feature (non-breaking change which adds functionality) * [ ] Breaking change (fix or feature that would cause existing functionality to change) ## Checklist: * [ ] My code follows the code style of this project. * [ ] My change requires a change to the documentation. * [ ] I have updated the documentation accordingly. ================================================ FILE: .github/workflows/code.yml ================================================ name: code on: pull_request: branches: [main] permissions: contents: read jobs: go: permissions: contents: read # fetch code pull-requests: read # fetch pull requests runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 1 - name: Setup Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23 - name: Testing run: go test -timeout 30s -v ./... - name: Linting uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: version: v1.61.0 # Inherits workflow permissions. python: runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Linting uses: psf/black@1b2427a2b785cc4aac97c19bb4b9a0de063f9547 # v24.10.0 with: # This recursively scans the entire project. Note that `exclude` must be # an empty string: "An empty value means no paths are excluded." options: "--check --exclude=''" # Inherits workflow permissions. jsonnet: runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 1 - name: Setup Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23 - name: Compiling run: | go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 sh .github/workflows/code_jsonnet.sh ================================================ FILE: .github/workflows/code_jsonnet.sh ================================================ #!/bin/sh files=$(find . -name "*.jsonnet") for file in $files do # 'rev | cut | rev' converts "path/to/file.jsonnet" to "path/to/file.json" f=$(echo $file | rev | cut -c 4- | rev) # This is run from the root of the repo. jsonnet --ext-code-file sub="./substation.libsonnet" $file > $f done ================================================ FILE: .github/workflows/conventional_commits.yml ================================================ name: conventional_commits on: pull_request_target: types: - opened - edited - synchronize permissions: contents: read jobs: conventional_commits: permissions: pull-requests: read # analyze PRs statuses: write # update status of analyzed PR runs-on: ubuntu-latest steps: - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/release_please.yml ================================================ name: release_please on: push: branches: - main permissions: contents: read jobs: release_please: permissions: contents: write # create release commit pull-requests: write # create release PR runs-on: ubuntu-latest steps: - name: Tag Release id: release uses: googleapis/release-please-action@7987652d64b4581673a76e33ad5e98e3dd56832f # v4.1.3 with: release-type: go package-name: release-please-action - name: Checkout Repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 - name: Setup Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22 - name: Upload Release Artifact if: github.event_name == 'release' && github.event.prerelease == false uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0 with: version: latest args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAG: ${{ steps.release.outputs.tag_name }} ================================================ FILE: .github/workflows/scorecard.yml ================================================ # This workflow uses actions that are not certified by GitHub. They are provided # by a third-party and are governed by separate terms of service, privacy # policy, and support documentation. name: Scorecard supply-chain security on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection branch_protection_rule: # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: "17 17 * * 2" push: branches: ["main"] # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write # Uncomment the permissions below if installing in a private repository. # contents: read # actions: read steps: - name: "Checkout code" uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: # - you want to enable the Branch-Protection check on a *public* repository, or # - you are installing Scorecard on a *private* repository # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. # repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. # For private repositories: # - `publish_results` will always be set to `false`, regardless # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 with: name: sarif-results path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@cf5b0a9041d3c1d336516f1944c96d96598193cc # v2.19.1 with: sarif_file: results.sarif ================================================ FILE: .gitignore ================================================ # Ignore all * # Unignore all with extensions !*.* # Unignore all dirs !*/ # CHANGELOG is handled by Release Please CHANGELOG.md # ignore all JSON by default *.json *.jsonl # allow Visual Studio Code devcontainer config file !.devcontainer/devcontainer.json # allow Visual Studio Code settings config file !.vscode/settings.json # allow specific JSON files in the examples/ directory !examples/**/data*.json !examples/**/data*.jsonl !examples/**/stdout.jsonl # Go profiling files *.prof # Ignore macOS system files .DS_Store # Archive files *.zip # Terraform *.terraform* terraform.tfstate* # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python src/build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ ================================================ FILE: .golangci.yml ================================================ # This code is licensed under the terms of the MIT license. ## Config for golangci-lint v1.49.0 based on https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 run: # Timeout for analysis, e.g. 30s, 5m. # Default: 1m timeout: 3m # This file contains only configs which differ from defaults. # All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml linters-settings: cyclop: # The maximal code complexity to report. # Default: 10 max-complexity: 30 errcheck: # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. # Such cases aren't reported by default. # Default: false check-type-assertions: true gocognit: # Minimal code complexity to report # Default: 30 min-complexity: 45 # gomodguard: # blocked: # # List of blocked modules. # # Default: [] # modules: # - github.com/golang/protobuf: # recommendations: # - google.golang.org/protobuf # reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" nakedret: # Make an issue if func has more lines of code than this setting, and it has naked returns. # Default: 30 max-func-lines: 0 nolintlint: # Exclude following linters from requiring an explanation. # Default: [] allow-no-explanation: [ gocognit ] # Enable to require an explanation of nonzero length after each nolint directive. # Default: false require-explanation: true # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true rowserrcheck: # database/sql is always checked # Default: [] packages: - github.com/jmoiron/sqlx tenv: # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. # Default: false all: true linters: disable-all: true enable: ## enabled by default - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases - gosimple # specializes in simplifying a code - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - ineffassign # detects when assignments to existing variables are not used - staticcheck # is a go vet on steroids, applying a ton of static analysis checks - typecheck # like the front-end of a Go compiler, parses and type-checks Go code - unused # checks for unused constants, variables, functions and types ## disabled by default - bidichk # checks for dangerous unicode character sequences - bodyclose # checks whether HTTP response body is closed successfully - cyclop # checks function and package cyclomatic complexity - durationcheck # checks for two durations multiplied together - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error - execinquery # checks query string in Query function which reads your Go src files and warning it finds - exhaustive # checks exhaustiveness of enum switch statements - gocognit # computes and checks the cognitive complexity of functions - gocyclo # computes and checks the cyclomatic complexity of functions - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations - goprintffuncname # checks that printf-like functions are named with f at the end - nakedret # finds naked returns in functions greater than a specified function length - nestif # reports deeply nested if statements - nilerr # finds the code that returns nil even if it checks that the error is not nil - noctx # finds sending http request without context.Context - nolintlint # reports ill-formed or insufficient nolint directives - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL - predeclared # finds code that shadows one of Go's predeclared identifiers - reassign # checks that package variables are not reassigned - rowserrcheck # checks whether Err of rows is checked successfully - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - unconvert # removes unnecessary type conversions - unparam # reports unused function parameters - usestdlibvars # detects the possibility to use variables/constants from the Go standard library - wastedassign # finds wasted assignment statements - whitespace # detects leading and trailing whitespace - misspell # finds commonly misspelled English words in comments issues: # Maximum count of issues with the same text. # Set to 0 to disable. # Default: 3 max-same-issues: 50 exclude: - 'declaration of "(err|ctx)" shadows declaration at' exclude-rules: - source: "^//\\s*go:generate\\s" linters: [ lll ] - source: "(noinspection|TODO)" linters: [ godot ] - source: "//noinspection" linters: [ gocritic ] - source: "^\\s+if _, ok := err\\.\\([^.]+\\.InternalError\\); ok {" linters: [ errorlint ] - path: "_test\\.go" linters: - bodyclose - dupl - funlen - goconst - gosec - noctx - wrapcheck ================================================ FILE: .goreleaser.yaml ================================================ version: 2 before: hooks: - go mod tidy builds: - env: - CGO_ENABLED=0 goos: - linux - darwin goarch: - amd64 - arm64 main: ./cmd/substation binary: substation release: mode: keep-existing # required for compatibility with release-please archives: - format: tar.gz # this name template makes the OS and Arch compatible with the results of `uname`. name_template: >- {{ .ProjectName }}_ {{- .Os }}_ {{- if eq .Arch "amd64" }}x86_64 {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ end }} {{- if .Arm }}v{{ .Arm }}{{ end }} changelog: sort: asc filters: exclude: - "^docs:" - "^test:" ================================================ FILE: .vscode/settings.json ================================================ { "terminal.integrated.defaultProfile.linux": "bash", "go.useLanguageServer": true, "gopls": { "formatting.gofumpt": true, }, "go.formatTool": "gofumpt", "go.inferGopath": false, "go.lintOnSave": "workspace", "go.lintTool": "golangci-lint", "cSpell.enabled": false } ================================================ FILE: ADOPTERS.md ================================================ # Adopters If you're using Substation in your organization, please try to add your company name to this list. By [adding your name to this list](https://github.com/brexhq/substation/issues/new?assignees=&labels=&projects=&template=adopters.yaml&title=%5Borganization%5D+has+adopted+Substation%21), you help raise awareness for the project and grow our community of users! | Organization | Contact | Description of Use | |--------------|---------|--------------------| | [Brex](https://www.brex.com) | [@jshlbrd](https://github.com/jshlbrd) | All security event and audit logs (~5 TB/day) used by the security org are handled by Substation. | | [Verkada](https://www.verkada.com) | [@chencaoverkada](https://github.com/chencaoverkada) | Substation enriches and normalizes **all** of Verkada's logging pipelines, and filters logs before they are ingested into an in-house SIEM. | ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [INSERT CONTACT METHOD]. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to Substation Thank you so much for your interest in contributing to Substation! This document contains guidelines to follow when contributing to the project. ## Table Of Contents [Code of Conduct](#code-of-conduct) [Submissions](#submissions) + [Changes](#submitting-changes) + [Bugs](#submitting-bugs) + [Enhancements](#submitting-enhancements) [Development](#development) + [Development Environment](#development-environment) + [Conditions](#conditions) + [Transforms](#transforms) + [Testing](#testing) + [Config Unit Tests](#config-unit-tests) [Style Guides](#style-guides) + [Design Patterns](#design-patterns) + [Naming Conventions](#naming-conventions) + [Go](#go-style-guide) + [Python](#python-style-guide) ## Code of Conduct The Code of Conduct can be reviewed [here](CODE_OF_CONDUCT.md). ## Submissions ### Submitting Changes Pull requests should be submitted using the pull request template. Changes will be validated through automation and by the project maintainers before merging to main. ### Submitting Bugs Bugs should be submitted as issues using the issue template. ### Submitting Enhancements Enhancements should be submitted as issues using the issue template. ## Development ### Development Environment The project supports development through the use of [Visual Studio Code configurations](https://code.visualstudio.com/docs/remote/containers). The VS Code [development container](.devcontainer/Dockerfile) contains all packages required to develop and test changes locally before submitting pull requests. ### [Conditions](condition/) Each condition should be functional and solve a single problem, and each one is nested under a "family" of conditions. (We may ask that you split complex condition logic into multiple conditions.) For example, there is a family for string comparisons: - Equal To (`cnd.string.equal_to`, `cnd.str.eq`) - Starts With (`cnd.string.starts_with`, `cnd.str.prefix`) - Ends With (`cnd.string.ends_with`, `cnd.str.suffix`) - Contains (`cnd.string.contains`, `cnd.str.has`) - Match (regular expression) (`cnd.string.match`) - Greater Than (`cnd.string.greater_than`, `cnd.str.gt`) - Less Than (`cnd.string.less_than`, `cnd.str.lt`) Conditions may require changes to the [configuration library](substation.libsonnet) (usually when adding features or making breaking changes). For new conditions, we typically ask that you add a new [example](examples/) that uses a config unit test. Conditions may reuse these field structures: - `object`: For reading from JSON objects. In some cases, we may ask you to rename fields for consistency. ### [Transforms](transform/) Each transform should be functional and solve a single problem, and each one is nested under a "family" of transforms. (We may ask that you split complex transform logic into multiple transforms.) For example, there is a family for JSON object operations: - Copy (`tf.object.copy`, `tf.obj.cp`) - Delete (`tf.object.delete`, `tf.obj.del`) - Insert (`tf.object.insert`) - To Boolean (`tf.object.to.boolean`, `tf.obj.to.bool`) - To String (`tf.object.to.string`, `tf.obj.to.str`) - To Float (`tf.object.to.float`) - To Integer (`tf.object.to.integer`, `tf.obj.to.int`) - To Unsigned Integer (`tf.object.to.unsigned_integer`, `tf.obj.to.uint`) Transforms may require changes to the [configuration library](substation.libsonnet) (usually when adding features or making breaking changes). For new transforms, we typically ask that you add a new [example](examples/) that uses a config unit test. Transforms may reuse these field structures: - `id`: For uniquely identifying a transform. (If not configured, then this is automatically generated when a configuration is compiled by Jsonnet.) - `object`: For reading from and writing to JSON objects. - `batch`: For stateful collection of multiple messages in a transform. - `transforms`: For chaining multiple transforms together. (Used in `meta` transforms.) - `aux_transforms`: For chaining multiple transforms together, _after_ the primary transform has executed. (Used in `send` transforms.) In some cases, we may ask you to rename fields for consistency. ### Testing We rely on contributors to test changes before they are submitted as pull requests. Any components added or changed should be tested and public packages should be supported by unit tests. #### Config Unit Tests Configuration examples should use config unit tests to demo new concepts or features, like this: ```jsonnet { tests: [ { // Every test should have a unique name. name: 'my-passing-test', // Generates the test message '{"a": true}' which // is run through the configured transforms and // then checked against the condition. transforms: [ sub.tf.test.message({ value: {a: true} }), ], // Checks if key 'x' == 'true'. condition: sub.cnd.all([ sub.cnd.str.eq({ object: {source_key: 'x'}, value: 'true' }), ]) }, ], // These transforms process the test message and the result // is checked against the condition. transforms: [ // Copies the value of key 'a' to key 'x'. sub.tf.obj.cp({ object: { source_key: 'a', target_key: 'x' } }), ], } ``` ## Style Guides ### Design Patterns #### Environment Variables Applications may implement runtime settings that are managed by environment variables. For example, the [AWS Lambda application](/cmd/aws/lambda/substation/) uses `SUBSTATION_LAMBDA_HANDLER` to manage [invocation settings](https://docs.aws.amazon.com/lambda/latest/dg/lambda-invocation.html). These should reference the application by name, if possible. #### Configurations Substation uses a single configuration pattern for all components in the system (see `Config` in [config/config.go](/config/config.go)). This pattern is highly reusable and should be embedded to create custom configurations. Below is an example that shows how configurations should be designed: ```json "foo": { "settings": { ... }, "type": "fooer" }, "bar": { "settings": { "baz": [ { "settings": { ... }, "type": "bazar" }, ] }, "type": "barre" } ``` Repeating this pattern allows components and applications to integrate with Substation's factory patterns. #### Factories Substation relies on [factory methods](https://refactoring.guru/design-patterns/factory-method) to create objects that [satisfy interfaces](https://go.dev/doc/effective_go#interface_methods) across the project. Factories should be combined with the configuration design pattern to create new components. Factories are the preferred method for allowing users to customize the system. Example factories can be seen in [condition](/condition/condition.go) and [transform](/transform/transform.go). #### Reading and Writing Streaming Data We prefer to use the io package for reading (e.g., io.Reader) and writing (e.g., io.Writer) streams of data. This reduces memory usage and decreases the likelihood that we will need to refactor methods and functions that handle streaming data. Substation commonly uses these io compatible containers: - open files are created by calling `os.CreateTemp("", "substation")` - bytes buffers are created by calling `new(bytes.Buffer)` ### Naming Conventions #### Breaking Changes Any change that modifies the public API of Go packages and applications is a breaking change, and any source code that has non-obvious impact on the public API should be tagged with `BREAKING CHANGE` in a comment. #### Errors Errors should always start with `err` (or `Err`, if they are public). Commonly used errors are defined in [internal/errors.go](internal/errors.go). If the error is related to a specific component, then the component name should be included in the error. For example, if the error is related to the `Foo` component, then the error should be named `errFooShortDescription`. #### Environment Variables Environment variable keys and values specific to the Substation application should always use SCREAMING_SNAKE_CASE. If the key or value refers to a cloud service provider, then it should always directly refer to that provider (for example, AWS_API_GATEWAY). Any environment variable that changes a default runtime setting should always start with SUBSTATION (for example, SUBSTATION_CONCURRENCY). #### Application Variables Variable names should always follow conventions from [Effective Go](https://go.dev/doc/effective_go#names), the [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments#variable-names) and avoid [predeclared identifiers](https://go.dev/ref/spec#Predeclared_identifiers). #### Source Metadata Sources that [add metadata during message creation](/message/) should use lowerCamelCase for their JSON keys. #### Package Configurations Configurations for packages (for example, conditions and transforms) should always use lower_snake_case in their JSON keys. This helps maintain readability when reviewing large configuration files. We strongly urge everyone to use Jsonnet for managing configurations. ### Go Style Guide Go code should follow [Effective Go](https://go.dev/doc/effective_go) as a baseline. ### Python Style Guide Python code should follow [Google's Python Style Guide](https://google.github.io/styleguide/pyguide.html) as a baseline. ================================================ FILE: MIGRATION.md ================================================ # Migration Use this as a guide for migrating between major versions of Substation. ## v2.0.0 ### Applications (cmd/) #### AWS Lambda Handlers Multiple AWS Lambda handlers were renamed to better reflect the AWS service they interact with: - Renamed `AWS_KINESIS_DATA_FIREHOSE` to `AWS_DATA_FIREHOSE`. - Renamed `AWS_KINESIS` to `AWS_KINESIS_DATA_STREAM`. - Renamed `AWS_DYNAMODB` to `AWS_DYNAMODB_STREAM`. v1.x.x: ```hcl module "node" { source = "build/terraform/aws/lambda" config = { name = "node" description = "Substation node that is invoked by a Kinesis Data Stream." image_uri = "123456789012.dkr.ecr.us-east-1.amazonaws.com/substation:v1.0.0" image_arm = true env = { "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS" } } } ``` v2.x.x: ```hcl module "node" { source = "build/terraform/aws/lambda" config = { name = "node" description = "Substation node that is invoked by a Kinesis Data Stream." image_uri = "123456789012.dkr.ecr.us-east-1.amazonaws.com/substation:v2.0.0" image_arm = true env = { "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" } } } ``` ### Conditions (condition/) #### Conditioner Interface The `Inspector` interface was renamed to `Conditioner` to standardize the naming convention used across the project. #### `meta.condition` Condition This is replaced by the `meta.all`, `meta.any`, and `meta.none` conditions. v1.x.x: ```jsonnet sub.cnd.all([ sub.cnd.str.eq({ value: 'FOO' }), sub.cnd.meta.condition({ condition: sub.cnd.any([ sub.cnd.str.eq({ value: 'BAR' }), sub.cnd.str.eq({ value: 'BAZ' }), ]) }), ]), ``` v2.x.x: ```jsonnet sub.cnd.all([ sub.cnd.str.eq({ value: 'FOO' }), sub.cnd.any([ sub.cnd.str.eq({ value: 'BAR' }), sub.cnd.str.eq({ value: 'BAZ' }), ]), ]), ``` #### `meta.for_each` Condition This is replaced by the `meta.all`, `meta.any`, and `meta.none` conditions. If the `object.source_key` value is an array, then the data is treated as a list of elements. v1.x.x: ```jsonnet sub.cnd.meta.for_each({ object: { source_key: 'field' }, type: 'any', inspector: sub.cnd.str.eq({ value: 'FOO' }), }) ``` v2.x.x: ```jsonnet sub.cnd.meta.any({ object: { source_key: 'field' }, conditions: [ sub.cnd.str.eq({ value: 'FOO' }) ], }) ``` #### `meta.negate` Condition This is replaced by the `meta.none` Condition. v1.x.x: ```jsonnet sub.cnd.meta.negate({ inspector: sub.cnd.str.eq({ value: 'FOO' }) }) ``` v2.x.x: ```jsonnet sub.cnd.meta.none({ conditions: [ sub.cnd.str.eq({ value: 'FOO' }) ] }) ``` ```jsonnet sub.cnd.none([ sub.cnd.str.eq({ value: 'FOO' }) ]) ``` #### `meta.err` Condition This is removed and was not replaced. Remove any references to this inspector. ### Transforms (transforms) #### `send.aws.*` Transforms The AWS resource fields were replaced by an `aws` object field that contains the sub-fields `arn` and `assume_role_arn`. The region for each AWS client is derived from either the resource ARN or assumed role ARN. v1.x.x: ```jsonnet sub.tf.send.aws.s3({ bucket_name: 'substation', file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.json' }, }), ``` v2.x.x: ```jsonnet sub.tf.send.aws.s3({ aws: { arn: 'arn:aws:s3:::substation' }, file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.json' }, }), ``` **NOTE: This change also applies to every configuration that relies on an AWS resource.** #### `meta.*` Transforms The `transform` field is removed from all transforms and was replaced with the `transforms` field. v1.x.x: ```jsonnet sub.tf.meta.switch({ cases: [ { condition: sub.cnd.all([ sub.cnd.str.eq({ obj: { source_key: 'field' }, value: 'FOO' }), ]), transform: sub.tf.obj.insert({ object: { target_key: 'field' }, value: 'BAR' }), }, ]}) ``` v2.x.x: ```jsonnet sub.tf.meta.switch({ cases: [ { condition: sub.cnd.str.eq({ obj: { source_key: 'field' }, value: 'FOO' }), transforms: [ sub.tf.obj.insert({ object: { target_key: 'field' }, value: 'BAR' }) ], }, ]}) ``` #### `meta.retry` Transform Retry settings were removed from all transforms and replaced by the `meta.retry` transform. It is recommended to create a reusable pattern for common retry scenarios. v1.x.x: ```jsonnet sub.tf.send.aws.sqs({ arn: 'arn:aws:sqs:us-east-1:123456789012:substation', retry: { count: 3 }, }) ``` v2.x.x: ```jsonnet sub.tf.meta.retry({ retry: { count: 3, delay: '1s' }, transforms: [ sub.tf.send.aws.sqs({ aws: { arn: 'arn:aws:sqs:us-east-1:123456789012:substation' }, }), ], }) ``` **NOTE: For AWS services, retries for the client can be configured in Terraform by using the AWS_MAX_ATTEMPTS environment variable. This is used _in addition_ the `meta.retry` transform.** #### `meta.pipeline` Transform This is removed and was not replaced. Remove any references to this transform and replace it with the `transforms` field used in other meta transforms. #### `send.aws.dynamodb` Transform The `send.aws.dynamodb` transform was renamed to `send.aws.dynamodb.put`. v1.x.x: ```jsonnet sub.tf.send.aws.dynamodb({ table_name: 'substation', }), ``` v2.x.x: ```jsonnet sub.tf.send.aws.dynamodb.put({ aws: { arn: 'arn:aws:dynamodb:us-east-1:123456789012:table/substation' }, }), ``` #### `enrich.aws.dynamodb` Transform The `enrich.aws.dynamodb` transform was renamed to `enrich.aws.dynamodb.query`, and had these additional changes: - `PartitionKey` and `SortKey` now reference the column names in the DynamoDB table and are nested under the `Attributes` field. - By default, the value retrieved from `Object.SourceKey` is used as the `PartitionKey` value. If the `SortKey` is provided and the value from `Object.SourceKey` is an array, then the first element is used as the `PartitionKey` value and the second element is used as the `SortKey` value. - The `KeyConditionExpression` field was removed because this is now a derived value. v1.x.x: ```jsonnet // In v1.x.x, the DynamoDB column names must always be 'PK' and/or 'SK'. sub.tf.obj.cp({ object: { src: 'id', trg: 'meta ddb.PK' } }), sub.transform.enrich.aws.dynamodb({ object: { source_key: 'meta ddb', target_key: 'user' }, table_name: 'substation', partition_key: 'PK', key_condition_expression: 'PK = :PK', }), ``` v2.x.x: ```jsonnet sub.transform.enrich.aws.dynamodb.query({ object: { source_key: 'id', target_key: 'user' }, aws: { arn: 'arn:aws:dynamodb:us-east-1:123456789012:table/substation' }, attributes: { partition_key: 'PK', }, }), ``` #### `send.aws.kinesis_data_firehose` Transform The `send.aws.kinesis_data_firehose` transform was renamed to `send.aws.data_firehose`. v1.x.x: ```jsonnet sub.tf.send.aws.kinesis_data_firehose({ stream_name: 'substation', }), ``` v2.x.x: ```jsonnet sub.tf.send.aws.data_firehose({ aws: { arn: 'arn:aws:kinesis:us-east-1:123456789012:stream/substation' }, }), ``` ================================================ FILE: README.md ================================================ # Substation ![Substation Banner](.github/media/substation_banner.png)

Substation is a toolkit for routing, normalizing, and enriching security event and audit logs.

[Releases][releases]   |   [Documentation][docs]   |   [Adopters][adopters]   |   [Announcement (2022)][announcement]   |   [v1.0 Release (2024)][v1_release] [![OSSF-Scorecard Score](https://img.shields.io/ossf-scorecard/github.com/brexhq/substation?style=for-the-badge)](https://scorecard.dev/viewer/?uri=github.com/brexhq/substation) ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/brexhq/substation/code.yml?style=for-the-badge) [![GitHub Release](https://img.shields.io/github/v/release/brexhq/substation?sort=semver&style=for-the-badge&link=https%3A%2F%2Fgithub.com%2Fbrexhq%2Fsubstation%2Freleases%2Flatest)](https://github.com/brexhq/substation/releases) ![GitHub Created At](https://img.shields.io/github/created-at/brexhq/substation?style=for-the-badge&label=created) [![GitHub License](https://img.shields.io/github/license/brexhq/substation?style=for-the-badge)](https://github.com/brexhq/substation/blob/main/LICENSE)
## Quickstart Want to see a demo before diving into the documentation? Run this command: ```sh export PATH=$PATH:$(go env GOPATH)/bin && \ go install github.com/brexhq/substation/v2/cmd/substation@latest && \ substation demo ``` ## At a Glance Substation is inspired by data pipeline systems like Logstash and Fluentd, but is built for modern security teams: - **Extensible Data Processing**: Build data processing pipeline systems and microservices using out-of-the-box applications and 100+ data transformation functions, or create your own written in Go. - **Route Data Across the Cloud**: Conditionally route data to, from, and between AWS cloud services, including S3, Kinesis, SQS, and Lambda, or to any HTTP endpoint. - **Bring Your Own Schema**: Format, normalize, and enrich event logs to comply with the Elastic Common Schema (ECS), Open Cybersecurity Schema Framework (OCSF), or any other schema. - **Unlimited Data Enrichment**: Use external APIs to enrich event logs affordably and at scale with enterprise and threat intelligence, or build a microservice that reduces spend in expensive security APIs. - **No Servers, No Maintenance**: Deploys as a serverless application in your AWS account, launches in minutes using Terraform, and requires no maintenance after deployment. - **Runs Almost Anywhere**: Create applications that run on most platforms supported by Go and transform data consistently across laptops, servers, containers, and serverless functions. - **High Performance, Low Cost**: Transform 100,000+ events per second while keeping cloud costs as low as a few cents per GB. Vendor solutions, like [Cribl](https://cribl.io/cribl-pricing/) and [Datadog](https://www.datadoghq.com/pricing/?product=observability-pipelines#products), can cost up to 10x more. All of these data pipeline and microservice systems, and many more, can be built with Substation: ![Example Substation architectures](.github/media/substation_architecture.png) ## Transforming Event Logs Substation excels at formatting, normalizing, and enriching event logs. For example, Zeek connection logs can be transformed to comply with the Elastic Common Schema:
Original Event Transformed Event
```json { "ts": 1591367999.430166, "uid": "C5bLoe2Mvxqhawzqqd", "id.orig_h": "192.168.4.76", "id.orig_p": 46378, "id.resp_h": "31.3.245.133", "id.resp_p": 80, "proto": "tcp", "service": "http", "duration": 0.25411510467529297, "orig_bytes": 77, "resp_bytes": 295, "conn_state": "SF", "missed_bytes": 0, "history": "ShADadFf", "orig_pkts": 6, "orig_ip_bytes": 397, "resp_pkts": 4, "resp_ip_bytes": 511 } ``` ```json { "event": { "original": { "ts": 1591367999.430166, "uid": "C5bLoe2Mvxqhawzqqd", "id.orig_h": "192.168.4.76", "id.orig_p": 46378, "id.resp_h": "31.3.245.133", "id.resp_p": 80, "proto": "tcp", "service": "http", "duration": 0.25411510467529297, "orig_bytes": 77, "resp_bytes": 295, "conn_state": "SF", "missed_bytes": 0, "history": "ShADadFf", "orig_pkts": 6, "orig_ip_bytes": 397, "resp_pkts": 4, "resp_ip_bytes": 511 }, "hash": "af70ea0b38e1fb529e230d3eca6badd54cd6a080d7fcb909cac4ee0191bb788f", "created": "2022-12-30T17:20:41.027505Z", "id": "C5bLoe2Mvxqhawzqqd", "kind": "event", "category": [ "network" ], "action": "network-connection", "outcome": "success", "duration": 254115104.675293 }, "@timestamp": "2020-06-05T14:39:59.430166Z", "client": { "address": "192.168.4.76", "ip": "192.168.4.76", "port": 46378, "packets": 6, "bytes": 77 }, "server": { "address": "31.3.245.133", "ip": "31.3.245.133", "port": 80, "packets": 4, "bytes": 295, "domain": "h31-3-245-133.host.redstation.co.uk", "top_level_domain": "co.uk", "subdomain": "h31-3-245-133.host", "registered_domain": "redstation.co.uk", "as": { "number": 20860, "organization": { "name": "Iomart Cloud Services Limited" } }, "geo": { "continent_name": "Europe", "country_name": "United Kingdom", "city_name": "Manchester", "location": { "latitude": 53.5039, "longitude": -2.1959 }, "accuracy": 1000 } }, "network": { "protocol": "tcp", "bytes": 372, "packets": 10, "direction": "outbound" } } ```
## Routing Data Substation can route data to several destinations from a single process and, unlike most other data pipeline systems, data transformation and routing are functionally equivalent -- this means that data can be transformed or routed in any order. In this configuration, data is: - Written to AWS S3 - Printed to stdout - Conditionally dropped (filtered, removed) - Sent to an HTTPS endpoint ```jsonnet // The input is a JSON array of objects, such as: // [ // { "field1": "a", "field2": 1, "field3": true }, // { "field1": "b", "field2": 2, "field3": false }, // ... // ] local sub = import 'substation.libsonnet'; // This filters events based on the value of field3. local is_false = sub.cnd.str.eq({ object: { source_key: 'field3' }, value: 'false' }); { transforms: [ // Pre-transformed data is written to an object in AWS S3 for long-term storage. sub.tf.send.aws.s3({ aws: { arn: 'arn:aws:s3:::example-bucket-name' } }), // The JSON array is split into individual events that go through // the remaining transforms. Each event is printed to stdout. sub.tf.agg.from.array(), sub.tf.send.stdout(), // Events where field3 is false are removed from the pipeline. sub.pattern.tf.conditional(condition=is_false, transform=sub.tf.util.drop()), // The remaining events are sent to an HTTPS endpoint. sub.tf.send.http.post({ url: 'https://example-http-endpoint.com' }), ], } ``` Alternatively, the data can be conditionally routed to different destinations: ```jsonnet local sub = import 'substation.libsonnet'; { transforms: [ // If field3 is false, then the event is sent to an HTTPS endpoint; otherwise, // the event is written to an object in AWS S3. sub.tf.meta.switch({ cases: [ { condition: sub.cnd.str.eq({ object: { source_key: 'field3' }, value: 'false' }), transforms: [ sub.tf.send.http.post({ url: 'https://example-http-endpoint.com' }), ], }, { transforms: [ sub.tf.send.aws.s3({ aws: { arn: 'arn:aws:s3:::example-bucket-name' } }), ], }, ] }), // The event is always available to any remaining transforms. sub.tf.send.stdout(), ], } ``` ## Configuring Applications Substation applications run almost anywhere (laptops, servers, containers, serverless functions) and all transform functions behave identically regardless of where they are run. This makes it easy to develop configuration changes locally, validate them in a build (CI/CD) pipeline, and run integration tests in a staging environment before deploying to production. Configurations are written in Jsonnet and can be expressed as functional code, simplifying version control and making it easy to build custom data processing libraries. For power users, configurations also have abbreviations that make them easier to write. Compare the configuration below to similar configurations for Logstash and Fluentd:
Substation Logstash Fluentd
```jsonnet local sub = import 'substation.libsonnet'; { transforms: [ sub.tf.obj.cp({ object: { source_key: 'src_field_1', target_key: 'dest_field_1' } }), sub.tf.obj.cp({ obj: { src: 'src_field_2', trg: 'dest_field_2' } }), sub.tf.send.stdout(), sub.tf.send.http.post({ url: 'https://example-http-endpoint.com' }), ], } ``` ```ruby input { file { path => "/path/to/your/file.log" start_position => "beginning" sincedb_path => "/dev/null" codec => "json" } } filter { json { source => "message" } mutate { copy => { "src_field_1" => "dest_field_1" } copy => { "src_field_2" => "dest_field_2" } } } output { stdout { codec => rubydebug } http { url => "https://example-http-endpoint.com" http_method => "post" format => "json" } } ``` ```xml @type tail path /path/to/your/file.log pos_file /dev/null tag file.log format json @type record_transformer enable_ruby dest_field_1 ${record['src_field_1']} dest_field_2 ${record['src_field_2']} @type copy @type stdout @type http url https://example-http-endpoint.com http_method post @type json ```
## Deploying to AWS Substation includes Terraform modules for securely deploying data pipelines and microservices in AWS. These modules are designed for ease of use, but are also flexible enough to support managing complex systems. This configuration deploys a data pipeline that is capable of receiving data from API Gateway and storing it in an S3 bucket:
resources.tf node.tf
```tcl # These resources are deployed once and are used by all Substation infrastructure. # Substation resources can be encrypted using a customer-managed KMS key. module "kms" { source = "build/terraform/aws/kms" config = { name = "alias/substation" } } # Substation typically uses AppConfig to manage configuration files, but # configurations can also be loaded from an S3 URI or an HTTP endpoint. module "appconfig" { source = "build/terraform/aws/appconfig" config = { name = "substation" environments = [{ name = "example" }] } } module "ecr" { source = "build/terraform/aws/ecr" kms = module.kms config = { name = "substation" force_delete = true } } resource "random_uuid" "s3" {} module "s3" { source = "build/terraform/aws/s3" kms = module.kms config = { # Bucket name is randomized to avoid collisions. name = "${random_uuid.s3.result}-substation" } # Access is granted by providing the role name of a # resource. This access applies least privilege and # grants access to dependent resources, such as KMS. access = [ # Lambda functions create unique roles that are # used to access resources. module.node.role.name, ] } ``` ```tcl # Deploys an unauthenticated API Gateway that forwards data to the node. module "node_gateway" { source = "build/terraform/aws/api_gateway/lambda" lambda = module.node config = { name = "node_gateway" } depends_on = [ module.node ] } module "node" { source = "build/terraform/aws/lambda" kms = module.kms # Optional appconfig = module.appconfig # Optional config = { name = "node" description = "Substation node that writes data to S3." image_uri = "${module.ecr.url}:latest" image_arm = true env = { "SUBSTATION_CONFIG" : "https://localhost:2772/applications/substation/environments/example/configurations/node" "SUBSTATION_DEBUG" : true # This Substation node will ingest data from API Gateway. More nodes can be # deployed to ingest data from other sources, such as Kinesis or SQS. "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" } } depends_on = [ module.appconfig.name, module.ecr.url, ] } ```
## Getting Started You can run Substation on: - [Docker](https://substation.readme.io/docs/try-substation-on-docker) - [macOS / Linux](https://substation.readme.io/docs/try-substation-on-macos-linux) - [AWS](https://substation.readme.io/docs/try-substation-on-aws) ### Testing Use the Substation CLI tool to run through [examples](examples/) and unit test configurations: ```sh substation test -h ``` Examples can be tested by running this command from the root of the project. For example: ```sh % substation test -R examples/transform/time/str_conversion {"time":"2024-01-01T01:02:03.123Z"} {"time":"2024-01-01T01:02:03"} ok examples/transform/time/str_conversion/config.jsonnet 133µs ``` ### Development [VS Code](https://code.visualstudio.com/docs/devcontainers/containers) is the recommended development environment for Substation. The project includes a [development container](.devcontainer/Dockerfile) that should be used to develop and test the system. Refer to the [development guide](CONTRIBUTING.md) for more information. If you don't use VS Code, then you should run the development container from the command line: ```sh git clone https://github.com/brexhq/substation.git && cd substation && \ docker build -t substation-dev .devcontainer/ && \ docker run -v $(pwd):/workspaces/substation/ -w /workspaces/substation -v /var/run/docker.sock:/var/run/docker.sock -it substation-dev ``` ### Deployment The [Terraform documentation](build/terraform/aws/) includes guidance for deploying Substation to AWS. ## Licensing Substation and its associated code is released under the terms of the [MIT License](LICENSE). [releases]:https://github.com/brexhq/substation/releases "Substation Releases" [docs]:https://substation.readme.io/docs "Substation Documentation" [adopters]:https://github.com/brexhq/substation/blob/main/ADOPTERS.md "Substation Adopters" [announcement]:https://medium.com/brexeng/announcing-substation-188d049d979b "Substation Announcement Post" [v1_release]:https://medium.com/brexeng/releasing-substation-v1-0-4d0314cbc45b "Substation v1.0 Release Post" ================================================ FILE: SECURITY.md ================================================ # Responsible Disclosure To report security issues in Substation, please follow [Brex's Responsible Disclosure process](https://www.brex.com/security/responsible-disclosure/). ================================================ FILE: VERSIONING.md ================================================ # Versioning Substation uses [Semantic Versioning 2.0](https://semver.org/). Versions are managed using Git tags and are updated by the maintainers when releases are made. The version applies to the [Go module](https://pkg.go.dev/github.com/brexhq/substation) and the components below: - cmd/aws/* - condition/* - config/* - message/* - transform/* - substation.go - substation.libsonnet - go.mod Some features may be labeled as "experimental" in the documentation. These features are not subject to the same versioning guarantees as the rest of the project and may be changed or removed at any time. ## Go Versioning Substation follows the [Go Release Policy](https://golang.org/doc/devel/release.html#policy). This means that the project will maintain compatibility with the latest two major versions of Go. For example, if the latest version of Go is 1.21, Substation will support Go 1.20 and 1.21. When Go 1.22 is released, Substation will drop support for Go 1.20 and support Go 1.21 and 1.22. ## Dependency Versioning Dependencies that are directly accessible via exported packages will cause version updates in Substation. For example, if an exported package dependency is patched, then Substation will also be patched. This also applies to minor and major updates. These dependencies can be identified in the `go.mod` file by the comment `// Upgrades require SemVer bump.`. ## Version Support The maintainers will actively support the latest release of Substation with features, bug fixes, and security patches. Older versions will only receive security patches. If you are using an old version of Substation, we recommend upgrading to the latest version. ================================================ FILE: go.mod ================================================ module github.com/brexhq/substation/v2 go 1.23.0 require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.37.1 github.com/aws/aws-sdk-go-v2/config v1.28.6 github.com/aws/aws-sdk-go-v2/credentials v1.17.47 github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.15.21 github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.56 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.38.0 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.36.0 github.com/aws/aws-sdk-go-v2/service/firehose v1.35.2 github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.7 github.com/aws/aws-sdk-go-v2/service/lambda v1.69.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7 github.com/aws/aws-sdk-go-v2/service/sns v1.33.7 github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2 github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 github.com/aws/aws-xray-sdk-go/v2 v2.0.0 github.com/aws/smithy-go v1.22.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20241004223953-c2774b1ab29b github.com/golang/protobuf v1.5.4 github.com/google/go-jsonnet v0.20.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/iancoleman/strcase v0.3.0 github.com/itchyny/gojq v0.12.17 github.com/klauspost/compress v1.17.11 github.com/oschwald/maxminddb-golang v1.13.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 github.com/tidwall/gjson v1.18.0 // Upgrades require SemVer bump. github.com/tidwall/sjson v1.2.5 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/net v0.41.0 golang.org/x/sync v0.16.0 ) require ( cloud.google.com/go/storage v1.54.0 github.com/GoogleCloudPlatform/functions-framework-go v1.9.2 github.com/cloudevents/sdk-go/v2 v2.15.2 ) require ( cel.dev/expr v0.20.0 // indirect cloud.google.com/go v0.121.0 // indirect cloud.google.com/go/auth v0.16.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/functions v1.19.3 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-jose/go-jose/v4 v4.1.2 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.10 // indirect github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect github.com/parquet-go/parquet-go v0.25.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.58.0 // indirect github.com/zeebo/errs v1.4.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect go.opentelemetry.io/otel v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/sdk v1.35.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 // indirect golang.org/x/crypto v0.40.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sys v0.34.0 // indirect golang.org/x/text v0.27.0 // indirect golang.org/x/time v0.11.0 // indirect google.golang.org/api v0.232.0 // indirect google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/grpc v1.72.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) ================================================ FILE: go.sum ================================================ cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.121.0 h1:pgfwva8nGw7vivjZiRfrmglGWiCJBP+0OmDpenG/Fwg= cloud.google.com/go v0.121.0/go.mod h1:rS7Kytwheu/y9buoDmu5EIpMMCI4Mb8ND4aeN4Vwj7Q= cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/functions v1.19.3 h1:V0vCHSgFTUqKn57+PUXp1UfQY0/aMkveAw7wXeM3Lq0= cloud.google.com/go/functions v1.19.3/go.mod h1:nOZ34tGWMmwfiSJjoH/16+Ko5106x+1Iji29wzrBeOo= cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= cloud.google.com/go/storage v1.54.0 h1:Du3XEyliAiftfyW0bwfdppm2MMLdpVAfiIg4T2nAI+0= cloud.google.com/go/storage v1.54.0/go.mod h1:hIi9Boe8cHxTyaeqh7KMMwKg088VblFK46C2x/BWaZE= cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= github.com/DATA-DOG/go-sqlmock v1.5.1 h1:FK6RCIUSfmbnI/imIICmboyQBkOckutaa6R5YYlLZyo= github.com/DATA-DOG/go-sqlmock v1.5.1/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/GoogleCloudPlatform/functions-framework-go v1.9.2 h1:Cev/PdoxY86bJjGwHJcpiWMhrZMVEoKp9wuEp9gCUvw= github.com/GoogleCloudPlatform/functions-framework-go v1.9.2/go.mod h1:wLEV4uSJztSBI+QyUy2fkHBuGFjRIAEDOqcEQ2hwmgE= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY= github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.15.21 h1:FdDxp4HNtJWPBAOdkJ+84Dfx2TOA7Dq+cH72GDHhjnA= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.15.21/go.mod h1:doHEXGiMWQBxcTJy3YN1Ao2HCgCuMWumuvTULGndCuQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.56 h1:LBLyOZPVFt53RvSOvzAfEs1lagLhNQQUO0q2gKpaNcQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.56/go.mod h1:Ul6ESIrlilRfsKcbXX+OKR5YNByw8UOutPrhlFKEOFA= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 h1:iLdpkYZ4cXIQMO7ud+cqMWR1xK5ESbt1rvN77tRi1BY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43/go.mod h1:OgbsKPAswXDd5kxnR4vZov69p3oYjbvUyIRBAAV0y9o= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 h1:4HbnOGE9491a9zYJ9VpPh1ApgEq6ZlD4Kuv1PJenFpc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1/go.mod h1:Z6QnHC6TmpJWUxAy8FI4JzA7rTwl6EIANkyK9OR5z5w= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2 h1:eMh+iBTF1CbpHMfiRvIaVm+rzrH1DOzuSFaR55O+bBo= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.42.2/go.mod h1:/A4zNqF1+RS5RV+NNLKIzUX1KtK5SoWgf/OpiqrwmBo= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.38.0 h1:isKhHsjpQR3CypQJ4G1g8QWx7zNpiC/xKw1zjgJYVno= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.38.0/go.mod h1:xDvUyIkwBwNtVZJdHEwAuhFly3mezwdEWkbJ5oNYwIw= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.9 h1:yhB2XYpHeWeAv5u3w9PFiSVIariSyhK5jcyQUFJpnIQ= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.9/go.mod h1:Hcjb2SiUo9v1GhpXjRNW7hAwfzAPfrsgnlKpP5UYEPY= github.com/aws/aws-sdk-go-v2/service/eventbridge v1.36.0 h1:UBCwgevYbPDbPb8LKyCmyBJ0Lk/gCPq4v85rZLe3vr4= github.com/aws/aws-sdk-go-v2/service/eventbridge v1.36.0/go.mod h1:ve9wzd6ToYjkZrF0nesNJxy14kU77QjrH5Rixrr4NJY= github.com/aws/aws-sdk-go-v2/service/firehose v1.35.2 h1:A4rkZ/YpyzoU8f8LMe1rPXEvkzX5R/vdAxDwN6IGegs= github.com/aws/aws-sdk-go-v2/service/firehose v1.35.2/go.mod h1:3Iza1sNaP9L+uKzhE08ilDSz8Dbu2tOL8e5exyj0etE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 h1:ps3nrmBWdWwakZBydGX1CxeYFK80HsQ79JLMwm7Y4/c= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1/go.mod h1:bAdfrfxENre68Hh2swNaGEVuFYE74o0SaSCAlaG9E74= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 h1:nbmKXZzXPJn41CcD4HsHsGWqvKjLKz9kWu6XxvLmf1s= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6/go.mod h1:SJhcisfKfAawsdNQoZMBEjg+vyN2lH6rO6fP+T94z5Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 h1:MdVYlN5pcQu1t1OYx4Ajo3fKl1IEhzgdPQbYFCRjYS8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1/go.mod h1:iikmNLrvHm2p4a3/4BPeix2S9P+nW8yM1IZW73x8bFA= github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.7 h1:QTtbqxI+i2gaWjcTwJZtm8/xEl9kiQXXbOatGabNuXA= github.com/aws/aws-sdk-go-v2/service/kinesis v1.32.7/go.mod h1:5aKZaOb2yfdeAOvfam0/6HoUXg01pN172bn7MqpM35c= github.com/aws/aws-sdk-go-v2/service/lambda v1.69.1 h1:q1NrvoJiz0rm9ayKOJ9wsMGmStK6rZSY36BDICMrcuY= github.com/aws/aws-sdk-go-v2/service/lambda v1.69.1/go.mod h1:hDj7He9kbR9T5zugnS+T21l4z6do4SEGuno/BpJLpA0= github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2 h1:OsggywXCk9iFKdu2Aopg3e1oJITIuyW36hA/B0rqupE= github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2/go.mod h1:ZnAMilx42P7DgIrdjlWCkNIGSBLzeyk6T31uB8oGTwY= github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 h1:Hsqo8+dFxSdDvv9B2PgIx1AJAnDpqgS0znVI+R+MoGY= github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1/go.mod h1:8Q0TAPXD68Z8YqlcIGHs/UNIDHsxErV9H4dl4vJEpgw= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7 h1:Nyfbgei75bohfmZNxgN27i528dGYVzqWJGlAO6lzXy8= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7/go.mod h1:FG4p/DciRxPgjA+BEOlwRHN0iA8hX2h9g5buSy3cTDA= github.com/aws/aws-sdk-go-v2/service/sns v1.33.7 h1:N3o8mXK6/MP24BtD9sb51omEO9J9cgPM3Ughc293dZc= github.com/aws/aws-sdk-go-v2/service/sns v1.33.7/go.mod h1:AAHZydTB8/V2zn3WNwjLXBK1RAcSEpDNmFfrmjvrJQg= github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2 h1:mFLfxLZB/TVQwNJAYox4WaxpIu+dFVIcExrmRmRCOhw= github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2/go.mod h1:GnvfTdlvcpD+or3oslHPOn4Mu6KaCwlCp+0p0oqWnrM= github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= github.com/aws/aws-xray-sdk-go/v2 v2.0.0 h1:/AkLb6rmRWjz8pQTm6BxCGcjebS+W1yFoH9rxy3ekM8= github.com/aws/aws-xray-sdk-go/v2 v2.0.0/go.mod h1:yyjiofE/pQ9u682QgBw3tkyuyvcN+6piDiQnhwWMyng= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20241004223953-c2774b1ab29b h1:kbD/R7CFXWfsTbiL+dlBMNhUi5z/KeSMan9oFSmtbxQ= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20241004223953-c2774b1ab29b/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g= github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= github.com/itchyny/gojq v0.12.17/go.mod h1:WBrEMkgAfAGO1LUcGOckBl5O726KPp+OlkKug0I/FEY= github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE= github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8= github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE= github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.232.0 h1:qGnmaIMf7KcuwHOlF3mERVzChloDYwRfOJOrHt8YC3I= google.golang.org/api v0.232.0/go.mod h1:p9QCfBWZk1IJETUdbTKloR5ToFdKbYh2fkjsUL6vNoY= google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 h1:vPV0tzlsK6EzEDHNNH5sa7Hs9bd7iXR7B1tSiPepkV0= google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:pKLAc5OolXC3ViWGI62vvC0n10CpwAtRcTNCFwTKBEw= google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 h1:IqsN8hx+lWLqlN+Sc3DoMy/watjofWiU8sRFgQ8fhKM= google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= ================================================ FILE: substation.go ================================================ package substation import ( "context" _ "embed" "encoding/json" "fmt" "github.com/brexhq/substation/v2/config" "github.com/brexhq/substation/v2/message" "github.com/brexhq/substation/v2/transform" ) //go:embed substation.libsonnet var Library string var errNoTransforms = fmt.Errorf("no transforms configured") // Config is the core configuration for the application. Custom applications // should embed this and add additional configuration options. type Config struct { // Transforms contains a list of data transformatons that are executed. Transforms []config.Config `json:"transforms"` } // Substation provides access to data transformation functions. type Substation struct { cfg Config factory transform.Factory tforms []transform.Transformer } // New returns a new Substation instance. func New(ctx context.Context, cfg Config, opts ...func(*Substation)) (*Substation, error) { if cfg.Transforms == nil { return nil, errNoTransforms } sub := &Substation{ cfg: cfg, factory: transform.New, } for _, o := range opts { o(sub) } // Create transforms from the configuration. for _, c := range cfg.Transforms { t, err := sub.factory(ctx, c) if err != nil { return nil, err } sub.tforms = append(sub.tforms, t) } return sub, nil } // WithTransformFactory implements a custom transform factory. func WithTransformFactory(fac transform.Factory) func(*Substation) { return func(s *Substation) { s.factory = fac } } // Transform runs the configured data transformation functions on the // provided messages. // // This is safe to use concurrently. func (s *Substation) Transform(ctx context.Context, msg ...*message.Message) ([]*message.Message, error) { return transform.Apply(ctx, s.tforms, msg...) } // String returns a JSON representation of the configuration. func (s *Substation) String() string { b, err := json.Marshal(s.cfg) if err != nil { return fmt.Sprintf("substation: %v", err) } return string(b) } ================================================ FILE: substation.libsonnet ================================================ local helpers = { // If the input is not an array, then this returns it as an array. make_array(i): if !std.isArray(i) then [i] else i, abbv(settings): std.mergePatch(settings, { object: if std.objectHas(settings, 'object') then $.abbv_obj(settings.object) else if std.objectHas(settings, 'obj') then $.abbv_obj(settings.obj) else null, obj: null, }), abbv_obj(s): { source_key: if std.objectHas(s, 'src') then s.src else if std.objectHas(s, 'source_key') then s.source_key else null, src: null, target_key: if std.objectHas(s, 'trg') then s.trg else if std.objectHas(s, 'target_key') then s.target_key else null, trg: null, batch_key: if std.objectHas(s, 'btch') then s.batch else if std.objectHas(s, 'batch_key') then s.batch_key else null, }, id(type, settings): std.join('-', [std.md5(type)[:8], std.md5(std.toString(settings))[:8]]), }; { // Mirrors interfaces from the condition package. cnd: $.condition, condition: { all(i): $.condition.meta.all({ conditions: helpers.make_array(i) }), any(i): $.condition.meta.any({ conditions: helpers.make_array(i) }), none(i): $.condition.meta.none({ conditions: helpers.make_array(i) }), meta: { all(settings={}): { local default = { object: $.config.object, conditions: [], }, type: 'meta_all', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, any(settings={}): { local default = { object: $.config.object, conditions: [], }, type: 'meta_any', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, none(settings={}): { local default = { object: $.config.object, conditions: [], }, type: 'meta_none', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, fmt: $.condition.format, format: { json(settings={}): { type: 'format_json', }, mime(settings={}): { local default = { object: $.config.object, type: null, }, type: 'format_mime', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, num: $.condition.number, number: { default: { object: $.config.object, value: null, }, eq(settings={}): $.condition.number.equal_to(settings=settings), equal_to(settings={}): { local default = $.condition.number.default, type: 'number_equal_to', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.number.less_than(settings=settings), less_than(settings={}): { local default = $.condition.number.default, type: 'number_less_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.number.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.number.default, type: 'number_greater_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, bitwise: { and(settings={}): { local default = { object: $.config.object, value: null, }, type: 'number_bitwise_and', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, not(settings={}): { local default = { object: $.config.object, }, type: 'number_bitwise_not', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, or(settings={}): { local default = { object: $.config.object, value: null, }, type: 'number_bitwise_or', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, xor(settings={}): { local default = { object: $.config.object, value: null, }, type: 'number_bitwise_xor', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, len: $.condition.number.length, length: { default: { object: $.config.object, value: null, measurement: 'byte', }, eq(settings={}): $.condition.number.length.equal_to(settings=settings), equal_to(settings={}): { local default = $.condition.number.length.default, type: 'number_length_equal_to', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.number.length.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.number.length.default, type: 'number_length_greater_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.number.length.less_than(settings=settings), less_than(settings={}): { local default = $.condition.number.length.default, type: 'number_length_less_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, net: $.condition.network, network: { ip: { default: { object: $.config.object, }, global_unicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_global_unicast', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, link_local_multicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_link_local_multicast', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, link_local_unicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_link_local_unicast', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, loopback(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_loopback', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, multicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_multicast', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, private(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_private', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_unicast', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unspecified(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_unspecified', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, valid(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_valid', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, str: $.condition.string, string: { default: { object: $.config.object, value: null, }, has(settings={}): $.condition.string.contains(settings=settings), contains(settings={}): { local default = $.condition.string.default, type: 'string_contains', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, eq(settings={}): $.condition.string.equal_to(settings=settings), equal_to(settings={}): { local default = $.condition.string.default, type: 'string_equal_to', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.string.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.string.default, type: 'string_greater_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.string.less_than(settings=settings), less_than(settings={}): { local default = $.condition.string.default, type: 'string_less_than', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, prefix(settings={}): $.condition.string.starts_with(settings=settings), starts_with(settings={}): { local default = $.condition.string.default, type: 'string_starts_with', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, suffix(settings={}): $.condition.string.ends_with(settings=settings), ends_with(settings={}): { local default = $.condition.string.default, type: 'string_ends_with', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, match(settings={}): { local default = { object: $.config.object, pattern: null, }, type: 'string_match', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, util: $.condition.utility, utility: { random(settings={}): { type: 'utility_random', }, }, }, // Mirrors interfaces from the transform package. tf: $.transform, transform: { agg: $.transform.aggregate, aggregate: { from: { arr(settings={}): $.transform.aggregate.from.array(settings=settings), array(settings={}): { local type = 'aggregate_from_array', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.aggregate.from.string(settings=settings), string(settings={}): { local type = 'aggregate_from_string', local default = { id: helpers.id(type, settings), separator: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, to: { arr(settings={}): $.transform.aggregate.to.array(settings=settings), array(settings={}): { local type = 'aggregate_to_array', local default = { id: helpers.id(type, settings), object: $.config.object, batch: $.config.batch, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.aggregate.to.string(settings=settings), string(settings={}): { local type = 'aggregate_to_string', local default = { id: helpers.id(type, settings), batch: $.config.batch, separator: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, arr: $.transform.array, array: { join(settings={}): { local type = 'array_join', local default = { id: helpers.id(type, settings), object: $.config.object, separator: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, zip(settings={}): { local type = 'array_zip', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, enrich: { aws: { dynamodb: { query(settings={}): { local type = 'enrich_aws_dynamodb_query', local default = { id: helpers.id(type, settings), object: $.config.object, aws: $.config.aws, attributes: { partition_key: null, sort_key: null }, limit: 1, scan_index_forward: false, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, lambda(settings={}): { local type = 'enrich_aws_lambda', local default = { id: helpers.id(type, settings), object: $.config.object, aws: $.config.aws, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, dns: { default: { object: $.config.object, request: $.config.request, }, domain_lookup(settings={}): { local type = 'enrich_dns_domain_lookup', local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, ip_lookup(settings={}): { local type = 'enrich_dns_ip_lookup', local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, txt_lookup(settings={}): { local type = 'enrich_dns_txt_lookup', local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, http: { default: { object: $.config.object, request: $.config.request, url: null, headers: null, }, get(settings={}): { local type = 'enrich_http_get', local default = $.transform.enrich.http.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, post(settings={}): { local type = 'enrich_http_post', local default = $.transform.enrich.http.default { body_key: null, id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, kv_store: { default: { object: $.config.object, prefix: null, kv_store: null, close_kv_store: false, }, iget: $.transform.enrich.kv_store.item.get, iset: $.transform.enrich.kv_store.item.set, item: { get(settings={}): { local type = 'enrich_kv_store_get', local default = $.transform.enrich.kv_store.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, set(settings={}): { local type = 'enrich_kv_store_set', local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, sadd: $.transform.enrich.kv_store.set.add, set: { add(settings={}): { local type = 'enrich_kv_store_set_add', local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, }, fmt: $.transform.format, format: { default: { object: $.config.object, }, from: { b64(settings={}): $.transform.format.from.base64(settings=settings), base64(settings={}): { local type = 'format_from_base64', local default = $.transform.format.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gz(settings={}): $.transform.format.from.gzip(settings=settings), gzip(settings={}): { local type = 'format_from_gzip', local default = { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, parquet(settings={}): { local type = 'format_from_parquet', local default = { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, pretty_print(settings={}): { local type = 'format_from_pretty_print', local default = { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, zip(settings={}): { local type = 'format_from_zip', local default = { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, to: { b64(settings={}): $.transform.format.to.base64(settings=settings), base64(settings={}): { local type = 'format_to_base64', local default = $.transform.format.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gz(settings={}): $.transform.format.to.gzip(settings=settings), gzip(settings={}): { local type = 'format_to_gzip', local default = { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, hash: { default: { object: $.config.object, }, md5(settings={}): { local type = 'hash_md5', local default = $.transform.hash.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, sha256(settings={}): { local type = 'hash_sha256', local default = $.transform.hash.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, num: $.transform.number, number: { max(settings={}): $.transform.number.maximum(settings=settings), maximum(settings={}): { local type = 'number_maximum', local default = { id: helpers.id(type, settings), object: $.config.object, value: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, min(settings={}): $.transform.number.minimum(settings=settings), minimum(settings={}): { local type = 'number_minimum', local default = { id: helpers.id(type, settings), object: $.config.object, value: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, math: { default: { object: $.config.object, }, add(settings={}): $.transform.number.math.addition(settings=settings), addition(settings={}): { local type = 'number_math_addition', local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, sub(settings={}): $.transform.number.math.subtraction(settings=settings), subtraction(settings={}): { local type = 'number_math_subtraction', local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, mul(settings={}): $.transform.number.math.multiplication(settings=settings), multiplication(settings={}): { local type = 'number_math_multiplication', local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, div(settings={}): $.transform.number.math.division(settings=settings), division(settings={}): { local type = 'number_math_division', local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, meta: { err(settings={}): { local type = 'meta_err', local default = { id: helpers.id(type, settings), transforms: null, error_messages: ['.*'], }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, for_each(settings={}): { local type = 'meta_for_each', local default = { id: helpers.id(type, settings), object: $.config.object, transforms: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, kv_store: { lock(settings={}): { local type = 'meta_kv_store_lock', local default = { id: helpers.id(type, settings), object: $.config.object { ttl_key: null }, transforms: null, kv_store: null, prefix: null, ttl_offset: '0s', }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, metric: { duration(settings={}): { local type = 'meta_metric_duration', local default = { id: helpers.id(type, settings), metric: $.config.metric, transforms: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, retry(settings={}): { local type = 'meta_retry', local default = { id: helpers.id(type, settings), retry: $.config.retry, transforms: null, condition: null, error_messages: ['.*'], }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, switch(settings={}): { local type = 'meta_switch', local default = { id: helpers.id(type, settings), cases: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, net: $.transform.network, network: { domain: { default: { object: $.config.object, }, registered_domain(settings={}): { local type = 'network_domain_registered_domain', local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, subdomain(settings={}): { local type = 'network_domain_subdomain', local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, tld(settings={}): $.transform.network.domain.top_level_domain(settings=settings), top_level_domain(settings={}): { local type = 'network_domain_top_level_domain', local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, obj: $.transform.object, object: { default: { object: $.config.object, }, cp(settings={}): $.transform.object.copy(settings=settings), copy(settings={}): { local type = 'object_copy', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, del(settings={}): $.transform.object.delete(settings=settings), delete(settings={}): { local type = 'object_delete', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, insert(settings={}): { local type = 'object_insert', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, jq(settings={}): { local type = 'object_jq', local default = { id: helpers.id(type, settings), filter: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, to: { bool(settings={}): $.transform.object.to.boolean(settings=settings), boolean(settings={}): { local type = 'object_to_boolean', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, float(settings={}): { local type = 'object_to_float', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, int(settings={}): $.transform.object.to.integer(settings=settings), integer(settings={}): { local type = 'object_to_integer', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.object.to.string(settings=settings), string(settings={}): { local type = 'object_to_string', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, uint(settings={}): $.transform.object.to.unsigned_integer(settings=settings), unsigned_integer(settings={}): { local type = 'object_to_unsigned_integer', local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, send: { aws: { dynamodb: { put(settings={}): { local type = 'send_aws_dynamodb_put', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, firehose(settings={}): $.transform.send.aws.data_firehose(settings=settings), data_firehose(settings={}): { local type = 'send_aws_data_firehose', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, eventbridge(settings={}): { local type = 'send_aws_eventbridge', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, description: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, kinesis_data_stream(settings={}): { local type = 'send_aws_kinesis_data_stream', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, use_batch_key_as_partition_key: false, enable_record_aggregation: false, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, lambda(settings={}): { local type = 'send_aws_lambda', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, }, type: type, settings: std.mergePatch(default, helpers.abbv(settings)), }, s3(settings={}): { local type = 'send_aws_s3', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, file_path: $.file_path, auxiliary_transforms: null, storage_class: 'STANDARD', }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, sns(settings={}): { local type = 'send_aws_sns', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, sqs(settings={}): { local type = 'send_aws_sqs', local default = { id: helpers.id(type, settings), batch: $.config.batch, aws: $.config.aws, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, gcp: { storage(settings={}): { local type = 'send_gcp_storage', local default = { id: helpers.id(type, settings), batch: $.config.batch, gcp: $.config.gcp, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, file(settings={}): { local type = 'send_file', local default = { id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, file_path: $.file_path, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, http: { post(settings={}): { local type = 'send_http_post', local default = { id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, url: null, headers: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, headers: if std.objectHas(settings, 'headers') then settings.headers else if std.objectHas(settings, 'hdr') then settings.hdr else null, hdr: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, stdout(settings={}): { local type = 'send_stdout', local default = { id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, }, local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, str: $.transform.string, string: { append(settings={}): { local type = 'string_append', local default = { id: helpers.id(type, settings), object: $.config.object, suffix: null, suffix_key: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, capture(settings={}): { local type = 'string_capture', local default = { id: helpers.id(type, settings), object: $.config.object, pattern: null, count: 0, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, repl: $.transform.string.replace, replace(settings={}): { local type = 'string_replace', local default = { id: helpers.id(type, settings), object: $.config.object, pattern: null, replacement: null, }, local s = std.mergePatch(settings, { pattern: settings.pattern, replacement: if std.objectHas(settings, 'replacement') then settings.replacement else if std.objectHas(settings, 'repl') then settings.repl else null, repl: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, split(settings={}): { local type = 'string_split', local default = { id: helpers.id(type, settings), object: $.config.object, separator: null, }, local s = std.mergePatch(settings, { separator: if std.objectHas(settings, 'separator') then settings.separator else if std.objectHas(settings, 'sep') then settings.sep else null, sep: null, }), type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, to: { default: { object: $.config.object, }, lower(settings={}): { local type = 'string_to_lower', local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, upper(settings={}): { local type = 'string_to_upper', local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, snake(settings={}): { local type = 'string_to_snake', local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, uuid(settings={}): { local type = 'string_uuid', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, test: { message(settings={}): { local type = 'test_message', local default = { id: helpers.id(type, settings), value: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, time: { from: { str(settings={}): $.transform.time.from.string(settings=settings), string(settings={}): { local type = 'time_from_string', local default = { id: helpers.id(type, settings), object: $.config.object, format: null, location: 'UTC', }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix(settings={}): { local type = 'time_from_unix', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix_milli(settings={}): { local type = 'time_from_unix_milli', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, now(settings={}): { local type = 'time_now', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, to: { str(settings={}): $.transform.time.to.string(settings=settings), string(settings={}): { local type = 'time_to_string', local default = { id: helpers.id(type, settings), object: $.config.object, format: null, location: 'UTC', }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, unix(settings={}): { local type = 'time_to_unix', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix_milli(settings={}): { local type = 'time_to_unix_milli', local default = { id: helpers.id(type, settings), object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, util: $.transform.utility, utility: { control(settings={}): { local type = 'utility_control', local default = { id: helpers.id(type, settings), batch: $.config.batch, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, delay(settings={}): { local type = 'utility_delay', local default = { id: helpers.id(type, settings), duration: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, drop(settings={}): { local type = 'utility_drop', local default = { id: helpers.id(type, settings), }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, err(settings={}): { local type = 'utility_err', local default = { id: helpers.id(type, settings), message: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, metric: { bytes(settings={}): { local type = 'utility_metric_bytes', local default = { id: helpers.id(type, settings), metric: $.config.metric, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, count(settings={}): { local type = 'utility_metric_count', local default = { id: helpers.id(type, settings), metric: $.config.metric, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, freshness(settings={}): { local type = 'utility_metric_freshness', local default = { id: helpers.id(type, settings), threshold: null, metric: $.config.metric, object: $.config.object, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, secret(settings={}): { local type = 'utility_secret', local default = { id: helpers.id(type, settings), secret: null, }, type: type, settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, // Mirrors interfaces from the internal/kv_store package. kv_store: { aws: { dynamodb(settings={}): { local default = { aws: $.config.aws, attributes: { partition_key: null, sort_key: null, value: null, ttl: null }, consistent_read: false, }, type: 'aws_dynamodb', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, csv_file(settings={}): { local default = { file: null, column: null, delimiter: ',', header: null }, type: 'csv_file', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, json_file(settings=$.defaults.kv_store.json_file.settings): { local default = { file: null, is_lines: false }, type: 'json_file', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, memory(settings={}): { local default = { capacity: 1024 }, type: 'memory', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, mmdb(settings={}): { local default = { file: null }, type: 'mmdb', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, text_file(settings={}): { local default = { file: null }, type: 'text_file', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, // Mirrors interfaces from the internal/secrets package. secrets: { default: { id: null, ttl: null }, aws: { secrets_manager(settings={}): { local default = { aws: $.config.aws, id: null, ttl_offset: null, }, type: 'aws_secrets_manager', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, environment_variable(settings={}): { local default = { id: null, name: null, ttl_offset: null }, type: 'environment_variable', settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, // Mirrors structs from the internal/config package. config: { aws: { arn: null, assume_role_arn: null }, gcp: { resource: null }, batch: { count: 1000, size: 1000 * 1000, duration: '1m' }, metric: { name: null, attributes: null, destination: null }, object: { source_key: null, target_key: null, batch_key: null }, request: { timeout: '1s' }, retry: { count: 3, delay: '1s' }, }, // Mirrors config from the internal/file package. file_path: { prefix: null, time_format: '2006/01/02', uuid: true, suffix: null }, } ================================================ FILE: substation_test.go ================================================ package substation_test import ( "context" "encoding/json" "fmt" "testing" "github.com/brexhq/substation/v2" "github.com/brexhq/substation/v2/config" "github.com/brexhq/substation/v2/message" "github.com/brexhq/substation/v2/transform" ) func ExampleSubstation() { // Substation applications rely on a context for cancellation and timeouts. ctx := context.Background() // Define a configuration. For native Substation applications, this is managed by Jsonnet. // // This example copies an object's value and prints the data to stdout. conf := []byte(` { "transforms":[ {"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}}, {"type":"send_stdout"} ] } `) cfg := substation.Config{} if err := json.Unmarshal(conf, &cfg); err != nil { // Handle error. panic(err) } // Create a new Substation instance. sub, err := substation.New(ctx, cfg) if err != nil { // Handle error. panic(err) } // Print the Substation configuration. fmt.Println(sub) // Substation instances process data defined as a Message. Messages can be processed // individually or in groups. This example processes multiple messages as a group. msg := []*message.Message{ // The first message is a data message. Only data messages are transformed. message.New().SetData([]byte(`{"a":"b"}`)), // The second message is a ctrl message. ctrl messages flush the pipeline. message.New().AsControl(), } // Transform the group of messages. In this example, results are not used. if _, err := sub.Transform(ctx, msg...); err != nil { // Handle error. panic(err) } // Output: // {"transforms":[{"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}},{"type":"send_stdout","settings":null}]} // {"a":"b","c":"b"} } // Custom applications should embed the Substation configuration and // add additional configuration options. type customConfig struct { substation.Config Auth struct { Username string `json:"username"` // Please don't store passwords in configuration files, this is only an example! Password string `json:"password"` } `json:"auth"` } // String returns an example string representation of the custom configuration. func (c customConfig) String() string { return fmt.Sprintf("%s:%s", c.Auth.Username, c.Auth.Password) } func Example_substationCustomConfig() { // Substation applications rely on a context for cancellation and timeouts. ctx := context.Background() // Define and load the custom configuration. This config includes a username // and password for authentication. conf := []byte(` { "transforms":[ {"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}}, {"type":"send_stdout"} ], "auth":{ "username":"foo", "password":"bar" } } `) cfg := customConfig{} if err := json.Unmarshal(conf, &cfg); err != nil { // Handle error. panic(err) } // Create a new Substation instance from the embedded configuration. sub, err := substation.New(ctx, cfg.Config) if err != nil { // Handle error. panic(err) } // Print the Substation configuration. fmt.Println(sub) // Print the custom configuration. fmt.Println(cfg) // Output: // {"transforms":[{"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}},{"type":"send_stdout","settings":null}]} // foo:bar } func Example_substationCustomTransforms() { // Substation applications rely on a context for cancellation and timeouts. ctx := context.Background() // Define and load the configuration. This config includes a transform that // is not part of the standard Substation package. conf := []byte(` { "transforms":[ {"type":"utility_duplicate"}, {"type":"send_stdout"} ] } `) cfg := substation.Config{} if err := json.Unmarshal(conf, &cfg); err != nil { // Handle error. panic(err) } // Create a new Substation instance with a custom transform factory for loading // the custom transform. sub, err := substation.New(ctx, cfg, substation.WithTransformFactory(customFactory)) if err != nil { // Handle error. panic(err) } msg := []*message.Message{ message.New().SetData([]byte(`{"a":"b"}`)), message.New().AsControl(), } // Transform the group of messages. In this example, results are not used. if _, err := sub.Transform(ctx, msg...); err != nil { // Handle error. panic(err) } // Output: // {"a":"b"} // {"a":"b"} } // customFactory is used in the custom transform example to load the custom transform. func customFactory(ctx context.Context, cfg config.Config) (transform.Transformer, error) { switch cfg.Type { // Usually a custom transform requires configuration, but this // is a toy example. Customizable transforms should have a new // function that returns a new instance of the configured transform. case "utility_duplicate": return &utilityDuplicate{Count: 1}, nil } return transform.New(ctx, cfg) } // Duplicates a message. type utilityDuplicate struct { // Count is the number of times to duplicate the message. Count int `json:"count"` } func (t *utilityDuplicate) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { // Always return control messages. if msg.IsControl() { return []*message.Message{msg}, nil } output := []*message.Message{msg} for i := 0; i < t.Count; i++ { output = append(output, msg) } return output, nil } func FuzzTestSubstation(f *testing.F) { testcases := [][]byte{ []byte(`{"transforms":[{"type":"utility_duplicate"}]}`), []byte(`{"transforms":[{"type":"utility_duplicate", "count":2}]}`), []byte(`{"transforms":[{"type":"unknown_type"}]}`), []byte(`{"transforms":[{"type":"utility_duplicate", "count":"invalid"}]}`), []byte(``), } for _, tc := range testcases { f.Add(tc) } f.Fuzz(func(t *testing.T, data []byte) { ctx := context.TODO() var cfg substation.Config err := json.Unmarshal(data, &cfg) if err != nil { return } sub, err := substation.New(ctx, cfg) if err != nil { return } msg := message.New().SetData(data) _, err = sub.Transform(ctx, msg) if err != nil { return } }) } ================================================ FILE: substation_test.jsonnet ================================================ local sub = std.extVar('sub'); local src = 'source'; { condition: { all: sub.condition.all([$.condition.string.contains, $.condition.string.match]), any: sub.condition.any([$.condition.string.contains, $.condition.string.match]), none: sub.condition.none([$.condition.string.contains, $.condition.string.match]), meta: { all: sub.condition.meta.all({ inspectors: [$.condition.string.contains, $.condition.string.match] }), any: sub.condition.meta.any({ inspectors: [$.condition.string.contains, $.condition.string.match] }), none: sub.condition.meta.none({ inspectors: [$.condition.string.contains, $.condition.string.match] }), }, string: { contains: sub.condition.string.contains({ obj: { src: src }, value: 'z' }), match: sub.condition.string.match({ obj: { src: src }, pattern: 'z' }), }, }, }