Repository: skippbox/kubeless Branch: master Commit: af81a12a4b55 Files: 273 Total size: 24.9 MB Directory structure: gitextract_a4vhstu0/ ├── .circleci/ │ └── config.yml ├── .github/ │ ├── PULL_REQUEST_TEMPLATE.md │ ├── gcloud.json.enc │ └── issue_template.md ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── cmd/ │ ├── function-controller/ │ │ └── function-controller.go │ └── kubeless/ │ ├── autoscale/ │ │ ├── autoscale.go │ │ ├── autoscaleCreate.go │ │ ├── autoscaleDelete.go │ │ ├── autoscaleList.go │ │ ├── autoscaleList_test.go │ │ └── autoscale_test.go │ ├── completion/ │ │ └── completion.go │ ├── function/ │ │ ├── call.go │ │ ├── delete.go │ │ ├── deploy.go │ │ ├── describe.go │ │ ├── function.go │ │ ├── function_test.go │ │ ├── list.go │ │ ├── list_test.go │ │ ├── logs.go │ │ ├── top.go │ │ ├── top_test.go │ │ └── update.go │ ├── getserverconfig/ │ │ └── getServerConfig.go │ ├── kubeless.go │ ├── topic/ │ │ ├── topic.go │ │ ├── topicCreate.go │ │ ├── topicDelete.go │ │ ├── topicList.go │ │ └── topicPublish.go │ ├── trigger/ │ │ ├── cronjob/ │ │ │ ├── create.go │ │ │ ├── cronjob_trigger.go │ │ │ ├── delete.go │ │ │ ├── list.go │ │ │ └── update.go │ │ ├── http/ │ │ │ ├── create.go │ │ │ ├── delete.go │ │ │ ├── http_trigger.go │ │ │ ├── list.go │ │ │ └── update.go │ │ ├── kafka/ │ │ │ ├── create.go │ │ │ ├── delete.go │ │ │ ├── kafka_trigger.go │ │ │ ├── list.go │ │ │ └── update.go │ │ ├── kinesis/ │ │ │ ├── create.go │ │ │ ├── delete.go │ │ │ ├── kinesis_trigger.go │ │ │ ├── list.go │ │ │ ├── publish.go │ │ │ ├── stream_create.go │ │ │ └── update.go │ │ ├── nats/ │ │ │ ├── create.go │ │ │ ├── delete.go │ │ │ ├── list.go │ │ │ ├── nats_trigger.go │ │ │ ├── publish.go │ │ │ └── update.go │ │ └── trigger.go │ └── version/ │ └── version.go ├── docker/ │ ├── controller-manager │ ├── dev-environment/ │ │ ├── Dockerfile │ │ └── entry-point.sh │ ├── event-sources/ │ │ └── kubernetes/ │ │ ├── Dockerfile │ │ ├── README.md │ │ └── events.py │ ├── function-controller/ │ │ └── Dockerfile │ ├── function-image-builder/ │ │ ├── Dockerfile │ │ └── entrypoint.sh │ ├── runtime/ │ │ └── README.md │ └── unzip/ │ └── Dockerfile ├── docs/ │ ├── GKE-deployment.md │ ├── README.md │ ├── advanced-function-deployment.md │ ├── architecture.md │ ├── autoscaling.md │ ├── building-functions.md │ ├── cronjob-triggers.md │ ├── debug-functions.md │ ├── debugging.md │ ├── dev-guide.md │ ├── function-controller-configuration.md │ ├── http-triggers.md │ ├── implementing-new-runtime.md │ ├── implementing-new-trigger.md │ ├── kubeless-functions.md │ ├── kubeless-on-AKS.md │ ├── misc/ │ │ ├── kafka-pv-gke.yaml │ │ ├── kubeless-grafana-dashboard.json │ │ └── zookeeper-pv-gke.yaml │ ├── monitoring.md │ ├── proposals/ │ │ ├── decoupling-triggers-and-runtimes.md │ │ └── http-triggers.md │ ├── pubsub-functions.md │ ├── quick-start.md │ ├── release-flow.md │ ├── runtimes.md │ ├── streaming-functions.md │ ├── triggers.md │ ├── troubleshooting.md │ └── use-existing-kafka.md ├── examples/ │ ├── Makefile │ ├── README.md │ ├── ballerina/ │ │ ├── hello_with_conf/ │ │ │ ├── hello_with_conf.bal │ │ │ └── kubeless.toml │ │ ├── helloget.bal │ │ └── hellowithdata.bal │ ├── dotnetcore/ │ │ ├── dependency-yaml.cs │ │ ├── dependency-yaml.csproj │ │ ├── fibonacci.cs │ │ ├── fibonacci.csproj │ │ ├── helloget.cs │ │ ├── helloget.csproj │ │ ├── hellowithdata.cs │ │ └── hellowithdata.csproj │ ├── golang/ │ │ ├── go.mod │ │ ├── helloget.go │ │ ├── hellowithdata.go │ │ └── hellowithdeps.go │ ├── java/ │ │ ├── HelloGet.java │ │ ├── HelloWithData.java │ │ ├── HelloWithDeps.java │ │ └── pom.xml │ ├── jvm/ │ │ ├── Readme.md │ │ ├── java/ │ │ │ ├── Readme.md │ │ │ ├── build/ │ │ │ │ └── libs/ │ │ │ │ └── java-0.1-all.jar │ │ │ ├── build.gradle │ │ │ ├── src/ │ │ │ │ └── main/ │ │ │ │ └── java/ │ │ │ │ └── io/ │ │ │ │ └── ino/ │ │ │ │ └── Handler.java │ │ │ └── test-java-jvm.jar │ │ └── scala/ │ │ ├── Readme.md │ │ ├── build.sbt │ │ ├── project/ │ │ │ ├── assembly.sbt │ │ │ └── build.properties │ │ └── src/ │ │ └── main/ │ │ └── scala/ │ │ └── de/ │ │ └── inoio/ │ │ └── Handler.scala │ ├── nodejs/ │ │ ├── function.yaml │ │ ├── function1.yaml │ │ ├── helloFunctions.tar.bz2 │ │ ├── helloFunctions.tar.xz │ │ ├── helloget.js │ │ ├── hellostream.js │ │ ├── hellowithdata.js │ │ ├── hellowithdeps.js │ │ ├── index.js │ │ └── package.json │ ├── php/ │ │ ├── composer.json │ │ ├── helloget.php │ │ ├── hellowithdata.php │ │ └── hellowithdeps.php │ ├── python/ │ │ ├── Dockerfile │ │ ├── function.yaml │ │ ├── function1.yaml │ │ ├── helloget.py │ │ ├── hellowithdata.py │ │ ├── hellowithdeps.py │ │ ├── hellowithdepshelper.py │ │ └── requirements.txt │ └── ruby/ │ ├── Gemfile │ ├── function.yaml │ ├── helloget.rb │ ├── hellowithdata.rb │ ├── hellowithdeps.rb │ └── latest.rb ├── go.mod ├── go.sum ├── hack/ │ ├── boilerplate.go.txt │ └── update-codegen.sh ├── kubeless-non-rbac.jsonnet ├── kubeless-openshift.jsonnet ├── kubeless.jsonnet ├── manifests/ │ ├── README.md │ ├── autoscaling/ │ │ ├── custom-metrics.yaml │ │ ├── prometheus-operator.yaml │ │ ├── sample-metrics-app.yaml │ │ └── sample-prometheus-instance.yaml │ ├── kinesis/ │ │ └── kinesalite.yaml │ ├── monitoring/ │ │ ├── grafana-configmap.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-job.yaml │ │ ├── grafana-service.yaml │ │ └── prometheus.yaml │ ├── nats/ │ │ └── nats-cluster.yaml │ └── ui/ │ └── README.md ├── pkg/ │ ├── apis/ │ │ └── kubeless/ │ │ ├── register.go │ │ └── v1beta1/ │ │ ├── doc.go │ │ ├── function.go │ │ ├── register.go │ │ └── zz_generated.deepcopy.go │ ├── client/ │ │ ├── clientset/ │ │ │ └── versioned/ │ │ │ ├── clientset.go │ │ │ ├── doc.go │ │ │ ├── fake/ │ │ │ │ ├── clientset_generated.go │ │ │ │ ├── doc.go │ │ │ │ └── register.go │ │ │ ├── scheme/ │ │ │ │ ├── doc.go │ │ │ │ └── register.go │ │ │ └── typed/ │ │ │ └── kubeless/ │ │ │ └── v1beta1/ │ │ │ ├── doc.go │ │ │ ├── fake/ │ │ │ │ ├── doc.go │ │ │ │ ├── fake_function.go │ │ │ │ └── fake_kubeless_client.go │ │ │ ├── function.go │ │ │ ├── generated_expansion.go │ │ │ └── kubeless_client.go │ │ ├── informers/ │ │ │ └── externalversions/ │ │ │ ├── factory.go │ │ │ ├── generic.go │ │ │ ├── internalinterfaces/ │ │ │ │ └── factory_interfaces.go │ │ │ └── kubeless/ │ │ │ ├── interface.go │ │ │ └── v1beta1/ │ │ │ ├── function.go │ │ │ └── interface.go │ │ └── listers/ │ │ └── kubeless/ │ │ └── v1beta1/ │ │ ├── expansion_generated.go │ │ └── function.go │ ├── controller/ │ │ ├── function_controller.go │ │ └── function_controller_test.go │ ├── function-image-builder/ │ │ ├── image_builder.go │ │ └── layer-builder/ │ │ ├── description.go │ │ ├── description_test.go │ │ ├── layer.go │ │ ├── layer_builder.go │ │ ├── layer_test.go │ │ ├── manifest.go │ │ └── manifest_test.go │ ├── function-proxy/ │ │ ├── Gopkg.toml │ │ ├── proxy.go │ │ └── utils/ │ │ └── proxy-utils.go │ ├── functions/ │ │ └── params.go │ ├── langruntime/ │ │ ├── langruntime.go │ │ ├── langruntime_test.go │ │ └── langruntimetestutils.go │ ├── registry/ │ │ ├── registry.go │ │ └── registry_test.go │ ├── utils/ │ │ ├── configlocation.go │ │ ├── exec.go │ │ ├── exec_test.go │ │ ├── k8sutil.go │ │ ├── k8sutil_test.go │ │ ├── kubelessutil.go │ │ ├── kubelessutil_test.go │ │ └── metrics.go │ └── version/ │ └── version.go ├── script/ │ ├── .validate │ ├── binary │ ├── binary-cli │ ├── binary-controller │ ├── cluster-up-minikube.sh │ ├── create_release.sh │ ├── enable-gcloud.sh │ ├── find_digest.sh │ ├── integration-tests │ ├── libtest.bash │ ├── pull-or-build-image.sh │ ├── release_utils.sh │ ├── start-gke-env.sh │ ├── start-test-environment.sh │ ├── upload_release_notes.sh │ ├── validate-git-marks │ ├── validate-gofmt │ ├── validate-lint │ ├── validate-test │ └── validate-vet └── tests/ ├── deployment-tests.bats ├── integration-tests-cronjob.bats ├── integration-tests-http.bats ├── integration-tests-kafka.bats ├── integration-tests-kinesis.bats ├── integration-tests-nats.bats ├── integration-tests-prebuilt.bats └── integration-tests.bats ================================================ FILE CONTENTS ================================================ ================================================ FILE: .circleci/config.yml ================================================ version: 2 ## Definitions build_allways: &build_allways filters: tags: only: /.*/ defaults: &defaults environment: CONTROLLER_IMAGE_NAME: kubeless/function-controller BUILDER_IMAGE_NAME: kubeless/function-image-builder CGO_ENABLED: "0" TEST_DEBUG: "1" GKE_VERSION: 1.12 MINIKUBE_VERSION: v1.2.0 MANIFESTS: kubeless kubeless-non-rbac kubeless-openshift exports: &exports # It is not possible to resolve env vars in the environment section: # https://discuss.circleci.com/t/using-environment-variables-in-config-yml-not-working/14237 run: | CONTROLLER_TAG=${CIRCLE_TAG:-build-$CIRCLE_WORKFLOW_ID} echo "export CONTROLLER_TAG=${CONTROLLER_TAG}" >> $BASH_ENV echo "export CONTROLLER_IMAGE=${CONTROLLER_IMAGE_NAME}:${CONTROLLER_TAG}" >> $BASH_ENV echo "export FUNCTION_IMAGE_BUILDER=${BUILDER_IMAGE_NAME}:${CONTROLLER_TAG}" >> $BASH_ENV echo "export KUBECFG_JPATH=/home/circleci/src/github.com/kubeless/kubeless/ksonnet-lib" >> $BASH_ENV echo "export PATH=$(pwd)/bats/libexec:$GOPATH/bin:$PATH" >> $BASH_ENV echo "export GIT_SHA1=${CIRCLE_SHA1}" >> $BASH_ENV restore_workspace: &restore_workspace run: | make bootstrap sudo cp -r /tmp/go/bin/* /usr/local/bin/ cp -r /tmp/go/*yaml . #### End of definitions workflows: version: 2 kubeless: jobs: - build: <<: *build_allways - minikube: <<: *build_allways requires: - build - build-cross-binaries: <<: *build_allways requires: - build - minikube_build_functions: <<: *build_allways requires: - build - GKE: <<: *build_allways requires: - build - push_latest_images: filters: branches: only: master requires: - minikube - minikube_build_functions - GKE - release: filters: tags: only: /v.*/ branches: ignore: /.*/ requires: - minikube - minikube_build_functions - GKE jobs: build: <<: *defaults docker: - image: circleci/golang:1.15 steps: - checkout - restore_cache: keys: - go-mod-v4-{{ checksum "go.sum" }} - <<: *exports - run: go mod download - run: make bootstrap - run: make VERSION=${CONTROLLER_TAG} binary - run: make test - run: make validation - run: make all-yaml - run: | mkdir build-manifests IFS=' ' read -r -a manifests <<< "$MANIFESTS" for f in "${manifests[@]}"; do sed -i.bak 's/:latest/'":${CONTROLLER_TAG}"'/g' ${f}.yaml cp ${f}.yaml build-manifests/ done - persist_to_workspace: root: /go paths: - ./bin - persist_to_workspace: root: ./ paths: - ./*yaml - store_artifacts: path: /go/bin/kubeless destination: ./bin/kubeless - store_artifacts: path: ./build-manifests/ - save_cache: key: go-mod-v4-{{ checksum "go.sum" }} paths: - /go/pkg/mod minikube: <<: *defaults machine: image: ubuntu-1604:202007-01 steps: - checkout - run: sudo apt-get update -y - run: sudo apt-get install -y tar gzip bzip2 xz-utils - attach_workspace: at: /tmp/go - <<: *exports - <<: *restore_workspace - run: ./script/pull-or-build-image.sh function-controller - run: ./script/integration-tests minikube deployment - run: ./script/integration-tests minikube basic build-cross-binaries: <<: *defaults docker: - image: circleci/golang:1.15 steps: - <<: *exports - checkout - attach_workspace: at: /tmp/go - <<: *restore_workspace - run: make VERSION=${CIRCLE_TAG} binary-cross - store_artifacts: path: bundles minikube_build_functions: <<: *defaults machine: image: ubuntu-1604:202007-01 steps: - checkout - <<: *exports - attach_workspace: at: /tmp/go - <<: *restore_workspace - run: ./script/pull-or-build-image.sh function-controller - run: ./script/pull-or-build-image.sh function-image-builder - run: "echo '{\"insecure-registries\" : [\"0.0.0.0/0\"]}' > /tmp/daemon.json" - run: sudo mv /tmp/daemon.json /etc/docker/daemon.json - run: sudo service docker restart - run: docker info - run: docker run -d -p 5000:5000 --restart=always --name registry -v /data/docker-registry:/var/lib/registry registry:2 - run: "sed -i.bak 's/enable-build-step: \"false\"/enable-build-step: \"true\"/g' kubeless.yaml" - run: "sed -i.bak 's/function-registry-tls-verify: \"true\"/function-registry-tls-verify: \"false\"/g' kubeless.yaml" - run: ./script/integration-tests minikube deployment - run: ./script/integration-tests minikube prebuilt_functions GKE: <<: *defaults docker: - image: circleci/golang:1.15 steps: - run: | # In case of GKE we will only want to build if it is # a build of a branch in the kubeless/kubeless repository if [[ -n "$GKE_ADMIN" && -z "$CIRCLE_PULL_REQUESTS" ]]; then export SHOULD_TEST=1 fi if [[ "$SHOULD_TEST" != "1" ]]; then circleci step halt fi - checkout - <<: *exports - attach_workspace: at: /tmp/go - <<: *restore_workspace - setup_remote_docker - run: ./script/enable-gcloud.sh $(pwd) > /dev/null - run: echo "export ESCAPED_GKE_CLUSTER=$(echo ${GKE_CLUSTER}-ci-${CIRCLE_BRANCH:-$CIRCLE_TAG} | sed 's/[^a-z0-9-]//g')" >> $BASH_ENV - run: ./script/start-gke-env.sh $ESCAPED_GKE_CLUSTER $ZONE $GKE_VERSION $GKE_ADMIN > /dev/null - run: ./script/pull-or-build-image.sh function-controller - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} deployment - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} basic - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} cronjob push_latest_images: <<: *defaults docker: - image: circleci/golang:1.15 steps: - <<: *exports - setup_remote_docker - run: docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" - run: | images=( $CONTROLLER_IMAGE_NAME $BUILDER_IMAGE_NAME ) for image in "${images[@]}"; do echo "Pulling ${image}:${CONTROLLER_TAG}" docker pull ${image}:${CONTROLLER_TAG} docker tag ${image}:${CONTROLLER_TAG} ${image}:latest docker push ${image}:latest done release: <<: *defaults docker: - image: circleci/golang:1.15 steps: - <<: *exports - checkout - attach_workspace: at: /tmp/go - <<: *restore_workspace - run: make VERSION=${CIRCLE_TAG} binary-cross - run: for d in bundles/kubeless_*; do zip -r9 $d.zip $d/; done - run: ./script/create_release.sh ${CIRCLE_TAG} "${MANIFESTS}" ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ **Issue Ref**: [Issue number related to this PR or None] **Description**: [PR Description] **TODOs**: - [ ] Ready to review - [ ] Automated Tests - [ ] Docs ================================================ FILE: .github/issue_template.md ================================================ **Is this a BUG REPORT or FEATURE REQUEST?**: **What happened**: **What you expected to happen**: **How to reproduce it (as minimally and precisely as possible)**: **Anything else we need to know?**: **Environment**: - Kubernetes version (use `kubectl version`): - Kubeless version (use `kubeless version`): - Cloud provider or physical cluster: ================================================ FILE: .gitignore ================================================ ### Go ### # Binaries for programs and plugins *.exe *.dll *.so *.dylib # Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ ### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file .fuse_hidden* # KDE directory preferences .directory # Linux trash folder which might appear on any partition or disk .Trash-* # .nfs files are created when an open file is removed but is still being accessed .nfs* ### OSX ### *.DS_Store .AppleDouble .LSOverride ### vscode ### .vscode/ # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk ### Vim ### # swap [._]*.s[a-v][a-z] [._]*.sw[a-p] [._]s[a-v][a-z] [._]sw[a-p] # session Session.vim # temporary .netrwhist # auto-generated tag files tags ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. # User-specific files *.suo *.user *.userosscache *.sln.docstates # Build results [Dd]ebug/ [Dd]ebugPublic/ [Rr]elease/ [Rr]eleases/ x64/ x86/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ # JVM .classpath .project .settings # Visual Studio 2015 cache/options directory .vs/ # .NET Core project.lock.json project.fragment.lock.json artifacts/ **/Properties/launchSettings.json *_i.c *_p.c *_i.h *.ilk *.meta *.obj *.pch *.pdb *.pgc *.pgd *.rsp *.sbr *.tlb *.tli *.tlh *.tmp *.tmp_proj *.log *.vspscc *.vssscc .builds *.pidb *.svclog *.scc # NuGet Packages *.nupkg # The packages folder can be ignored because of Package Restore **/packages/* # except build/, which is used as an MSBuild target. !**/packages/build/ # Uncomment if necessary however generally it will be regenerated when needed #!**/packages/repositories.config # NuGet v3's project.json files produces more ignorable files *.nuget.props *.nuget.targets # IDEA Files .idea/ *.iml *.ipr *.iws # Kubeless specific bats/ bundles/ docker/function-controller/kubeless-function-controller docker/function-image-builder/imbuilder ksonnet-lib/ kubeless-openshift.yaml kubeless-non-rbac.yaml kubeless.yaml kafka-zookeeper.yaml kafka-zookeeper-openshift.yaml nats.yaml kinesis.yaml ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at kubernetes@bitnami.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Guidelines ## License and CLA The Kubeless license is Apache Software License V2 We do not currently ask for a Contributor License Agreement to be signed. ## Support Channels Whether you are a user or contributor, official support channels include: - GitHub [issues](https://github.com/kubeless/kubeless/issues/new) - Slack: #kubeless room in the [Kubernetes Slack](http://slack.k8s.io/) Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of. ## How to become a contributor and submit your own code ### Setup your development environment Consult the [Developer's guide](./docs/dev-guide.md) to setup yourself up. ### Contributing a patch 1. Submit an issue describing your proposed change to the repo in question. 2. The [repo owners](OWNERS) will respond to your issue promptly. 3. If your proposed change is accepted, fork the desired repo, develop and test your code changes. 4. Submit a pull request making sure you fill up clearly the description, point out the particular issue your PR is mitigating, and ask for code review. If the PR is related to Kafka, include at least the tag [Kafka] in the title. You will be asked to add tests (either unit or e2e tests depending on the patch) and update any affected documentation. ## Issues Issues are used as the primary method for tracking anything to do with the Kubeless project. ### Issue Type * Question: These are support or functionality inquiries that we want to have a record of for future reference. Generally these are questions that are too complex or large to store in the Slack channel or have particular interest to the community as a whole. Depending on the discussion, these can turn into "Feature" or "Bug" issues. * Proposal: Used for items (like this one) that propose a new ideas or functionality that require a larger community discussion. This allows for feedback from others in the community before a feature is actually developed. This is not needed for small additions. Final word on whether or not a feature needs a proposal is up to the core maintainers. All issues that are proposals should both have a label and an issue title of "Proposal: [the rest of the title]." A proposal can become a "Feature" and does not require a milestone. * Features: These track specific feature requests and ideas until they are complete. They can evolve from a "Proposal" or can be submitted individually depending on the size. * Bugs: These track bugs with the code or problems with the documentation (i.e. missing or incomplete) ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ GO = go GO_FLAGS = GOFMT = gofmt KUBECFG = kubecfg DOCKER = docker CONTROLLER_IMAGE = kubeless-function-controller:latest FUNCTION_IMAGE_BUILDER = kubeless-function-image-builder:latest OS = linux ARCH = amd64 BUNDLES = bundles GO_PACKAGES = ./cmd/... ./pkg/... GO_FILES := $(shell find $(shell $(GO) list -f '{{.Dir}}' $(GO_PACKAGES)) -name \*.go) export KUBECFG_JPATH := $(CURDIR)/ksonnet-lib export PATH := $(PATH):$(CURDIR)/bats/bin .PHONY: all KUBELESS_ENVS := \ -e OS_PLATFORM_ARG \ -e OS_ARCH_ARG \ default: binary binary: CGO_ENABLED=0 ./script/binary binary-cross: ./script/binary-cli %.yaml: %.jsonnet $(KUBECFG) show -U https://raw.githubusercontent.com/kubeless/runtimes/master -o yaml $< > $@.tmp mv $@.tmp $@ all-yaml: kubeless.yaml kubeless-non-rbac.yaml kubeless-openshift.yaml kubeless.yaml: kubeless.jsonnet kubeless-non-rbac.jsonnet kubeless-non-rbac.yaml: kubeless-non-rbac.jsonnet kubeless-openshift.yaml: kubeless-openshift.jsonnet docker/function-controller: controller-build cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-function-controller $@ controller-build: ./script/binary-controller -os=$(OS) -arch=$(ARCH) function-controller: docker/function-controller $(DOCKER) build -t $(CONTROLLER_IMAGE) $< docker/function-image-builder: function-image-builder-build cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/imbuilder $@ function-image-builder-build: ./script/binary-controller -os=$(OS) -arch=$(ARCH) imbuilder github.com/kubeless/kubeless/pkg/function-image-builder function-image-builder: docker/function-image-builder $(DOCKER) build -t $(FUNCTION_IMAGE_BUILDER) $< update: ./hack/update-codegen.sh test: $(GO) test $(GO_FLAGS) $(GO_PACKAGES) validation: ./script/validate-lint ./script/validate-gofmt ./script/validate-git-marks integration-tests: ./script/integration-tests minikube deployment ./script/integration-tests minikube basic fmt: $(GOFMT) -s -w $(GO_FILES) bats: git clone --branch=v0.4.0 --depth=1 https://github.com/sstephenson/bats.git ksonnet-lib: git clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git .PHONY: bootstrap bootstrap: bats ksonnet-lib GO111MODULE="off" go get -u github.com/mitchellh/gox GO111MODULE="off" go get -u golang.org/x/lint/golint @if ! which kubecfg >/dev/null; then \ sudo wget -q -O /usr/local/bin/kubecfg https://github.com/ksonnet/kubecfg/releases/download/v0.9.0/kubecfg-$$(go env GOOS)-$$(go env GOARCH); \ sudo chmod +x /usr/local/bin/kubecfg; \ fi @if ! which kubectl >/dev/null; then \ KUBECTL_VERSION=$$(wget -qO- https://storage.googleapis.com/kubernetes-release/release/stable.txt); \ sudo wget -q -O /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$$KUBECTL_VERSION/bin/$$(go env GOOS)/$$(go env GOARCH)/kubectl; \ sudo chmod +x /usr/local/bin/kubectl; \ fi ================================================ FILE: OWNERS ================================================ Kubeless - A Bitnami Project Engineering manager: - ppbaena Emeritus maintainers: - ngtuna - andresmgot - anguslees - sebgoa ================================================ FILE: README.md ================================================ # Kubeless logo [![CircleCI](https://circleci.com/gh/kubeless/kubeless.svg?style=svg)](https://circleci.com/gh/kubeless/kubeless) [![Slack](https://img.shields.io/badge/slack-join%20chat%20%E2%86%92-e01563.svg)](http://slack.k8s.io) [![Not Maintained](https://img.shields.io/badge/Maintenance%20Level-Not%20Maintained-yellow.svg)](https://gist.github.com/cheerfulstoic/d107229326a01ff0f333a1d3476e068d) ## WARNING: Kubeless is no longer actively maintained by VMware. VMware has made the difficult decision to stop driving this project and therefore we will no longer actively respond to issues or pull requests. If you would like to take over maintaining this project independently from VMware, please let us know so we can add a link to your forked project here. Thank You. ## Overview `kubeless` is a Kubernetes-native serverless framework that lets you deploy small bits of code without having to worry about the underlying infrastructure plumbing. It leverages Kubernetes resources to provide auto-scaling, API routing, monitoring, troubleshooting and more. Kubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism. Kubeless is purely open-source and non-affiliated to any commercial organization. Chime in at anytime, we would love the help and feedback ! ## Tools - A [UI](https://github.com/kubeless/kubeless-ui) is available. It can run locally or in-cluster. - A [serverless framework plugin](https://github.com/serverless/serverless-kubeless) is available. ## Quick start Check out the instructions for quickly set up Kubeless [here](http://kubeless.io/docs/quick-start). ## Building Consult the [developer's guide](docs/dev-guide.md) for a complete set of instruction to build kubeless. ## Compatibility Matrix with Kubernetes Kubeless fully supports Kubernetes versions greater than 1.9 (tested until 1.15). For other versions some of the features in Kubeless may not be available. Our CI run tests against two different platforms: GKE (1.12) and Minikube (1.15). Other platforms are supported but fully compatibiliy cannot be assured. ## _Roadmap_ We would love to get your help, feel free to lend a hand. We are currently looking to implement the following high level features: - Add other runtimes, currently Golang, Python, NodeJS, Ruby, PHP, .NET and Ballerina are supported. We are also providing a way to use custom runtime. Please check [this doc](./docs/runtimes.md) for more details. - Investigate other messaging bus (e.g SQS, rabbitMQ) - Optimize for functions startup time - Add distributed tracing (maybe using istio) ## Community **Issues**: If you find any issues, please [file it](https://github.com/kubeless/kubeless/issues). **Slack**: We're fairly active on [slack](http://slack.k8s.io) and you can find us in the #kubeless channel. ================================================ FILE: cmd/function-controller/function-controller.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Kubeless controller binary. // // See github.com/kubeless/kubeless/tree/master/pkg/controller package main import ( "fmt" "os" "os/signal" "syscall" monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1" "github.com/kubeless/kubeless/pkg/controller" "github.com/kubeless/kubeless/pkg/utils" "github.com/kubeless/kubeless/pkg/version" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) const ( globalUsage = `` //TODO: adding explanation ) var rootCmd = &cobra.Command{ Use: "kubeless-controller", Short: "Kubeless controller", Long: globalUsage, Run: func(cmd *cobra.Command, args []string) { kubelessClient, err := utils.GetFunctionClientInCluster() if err != nil { logrus.Fatalf("Cannot get kubeless client: %v", err) } functionCfg := controller.Config{ KubeCli: utils.GetClient(), FunctionClient: kubelessClient, } restCfg, err := utils.GetInClusterConfig() if err != nil { logrus.Fatalf("Cannot get REST client: %v", err) } // ServiceMonitor client is needed for handling monitoring resources smclient, err := monitoringv1alpha1.NewForConfig(restCfg) if err != nil { logrus.Fatal(err) } functionController := controller.NewFunctionController(functionCfg, smclient) stopCh := make(chan struct{}) defer close(stopCh) go functionController.Run(stopCh) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGTERM) signal.Notify(sigterm, syscall.SIGINT) <-sigterm }, } func main() { logrus.Infof("Running Kubeless controller manager version: %v", version.Version) if err := rootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } ================================================ FILE: cmd/kubeless/autoscale/autoscale.go ================================================ /* Copyright 2016 Skippbox, Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package autoscale import ( "fmt" "strconv" "github.com/spf13/cobra" "k8s.io/api/autoscaling/v2beta1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // AutoscaleCmd contains first-class command for autoscale var AutoscaleCmd = &cobra.Command{ Use: "autoscale SUBCOMMAND", Short: "manage autoscale to function on Kubeless", Long: `autoscale command allows user to list, create, delete autoscale rule for function on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { cmds := []*cobra.Command{autoscaleCreateCmd, autoscaleListCmd, autoscaleDeleteCmd} for _, cmd := range cmds { AutoscaleCmd.AddCommand(cmd) cmd.Flags().StringP("namespace", "n", "", "Specify namespace for the autoscale") } } func getHorizontalAutoscaleDefinition(name, ns, metric string, min, max int32, value string, labels map[string]string) (v2beta1.HorizontalPodAutoscaler, error) { m := []v2beta1.MetricSpec{} switch metric { case "cpu": i, err := strconv.ParseInt(value, 10, 32) if err != nil { return v2beta1.HorizontalPodAutoscaler{}, err } i32 := int32(i) m = []v2beta1.MetricSpec{ { Type: v2beta1.ResourceMetricSourceType, Resource: &v2beta1.ResourceMetricSource{ Name: v1.ResourceCPU, TargetAverageUtilization: &i32, }, }, } case "qps": q, err := resource.ParseQuantity(value) if err != nil { return v2beta1.HorizontalPodAutoscaler{}, err } m = []v2beta1.MetricSpec{ { Type: v2beta1.ObjectMetricSourceType, Object: &v2beta1.ObjectMetricSource{ MetricName: "function_calls", TargetValue: q, Target: v2beta1.CrossVersionObjectReference{ Kind: "Service", Name: name, }, }, }, } if err != nil { return v2beta1.HorizontalPodAutoscaler{}, err } default: return v2beta1.HorizontalPodAutoscaler{}, fmt.Errorf("metric %s is not supported", metric) } return v2beta1.HorizontalPodAutoscaler{ TypeMeta: metav1.TypeMeta{ APIVersion: "autoscaling/v2beta1", Kind: "HorizontalPodAutoscaler", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, Labels: labels, }, Spec: v2beta1.HorizontalPodAutoscalerSpec{ ScaleTargetRef: v2beta1.CrossVersionObjectReference{ APIVersion: "apps/v1beta1", Kind: "Deployment", Name: name, }, MinReplicas: &min, MaxReplicas: max, Metrics: m, }, }, nil } ================================================ FILE: cmd/kubeless/autoscale/autoscaleCreate.go ================================================ package autoscale import ( "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var autoscaleCreateCmd = &cobra.Command{ Use: "create FLAG", Short: "automatically scale function based on monitored metrics", Long: `automatically scale function based on monitored metrics`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } function, err := utils.GetFunction(funcName, ns) if err != nil { logrus.Fatalf("Unable to find the function %s. Received %s: ", funcName, err) } min, err := cmd.Flags().GetInt32("min") if err != nil { logrus.Fatal(err) } else if min <= 0 { logrus.Fatalf("min can't be negative or zero") } max, err := cmd.Flags().GetInt32("max") if err != nil { logrus.Fatal(err) } else if max < min { logrus.Fatalf("max must be greater than or equal to min") } metric, err := cmd.Flags().GetString("metric") if err != nil { logrus.Fatal(err) } if metric != "cpu" && metric != "qps" { logrus.Fatalf("only supported metrics: cpu, qps") } value, err := cmd.Flags().GetString("value") if err != nil { logrus.Fatal(err) } hpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, function.ObjectMeta.Labels) if err != nil { logrus.Fatal(err) } function.Spec.HorizontalPodAutoscaler = hpa kubelessClient, err := utils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } logrus.Infof("Adding autoscaling rule to the function...") err = utils.UpdateFunctionCustomResource(kubelessClient, &function) if err != nil { logrus.Fatal(err) } logrus.Infof("Autoscaling rule for %s submitted for deployment", funcName) }, } func init() { autoscaleCreateCmd.Flags().Int32("min", 1, "minimum number of replicas") autoscaleCreateCmd.Flags().Int32("max", 1, "maximum number of replicas") autoscaleCreateCmd.Flags().String("metric", "cpu", "metric to use for calculating the autoscale. Supported metrics: cpu, qps") autoscaleCreateCmd.Flags().String("value", "", "value of the average of the metric across all replicas. If metric is cpu, value is a number represented as percentage. If metric is qps, value must be in format of Quantity") autoscaleCreateCmd.MarkFlagRequired("value") } ================================================ FILE: cmd/kubeless/autoscale/autoscaleDelete.go ================================================ /* Copyright 2016 Skippbox, Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package autoscale import ( "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/autoscaling/v2beta1" ) var autoscaleDeleteCmd = &cobra.Command{ Use: "delete ", Short: "delete an autoscale from Kubeless", Long: `delete an autoscale from Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - autoscale name") } funcName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } function, err := utils.GetFunction(funcName, ns) if err != nil { logrus.Fatalf("Unable to find the function %s. Received %s: ", funcName, err) } if function.Spec.HorizontalPodAutoscaler.Name != "" { function.Spec.HorizontalPodAutoscaler = v2beta1.HorizontalPodAutoscaler{} kubelessClient, err := utils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } logrus.Infof("Removing autoscaling rule from the function...") err = utils.UpdateFunctionCustomResource(kubelessClient, &function) if err != nil { logrus.Fatal(err) } logrus.Infof("Remove Autoscaling rule from %s successfully", funcName) } else { logrus.Fatalf("Not found an autoscale definition for %s", funcName) } }, } ================================================ FILE: cmd/kubeless/autoscale/autoscaleList.go ================================================ /* Copyright 2016 Skippbox, Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package autoscale import ( "encoding/json" "fmt" "io" "github.com/ghodss/yaml" "github.com/gosuri/uitable" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/autoscaling/v2beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) var autoscaleListCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all autoscales in Kubeless", Long: `list all autoscales in Kubeless`, Run: func(cmd *cobra.Command, args []string) { output, err := cmd.Flags().GetString("out") if err != nil { logrus.Fatal(err.Error()) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = utils.GetDefaultNamespace() } client := utils.GetClientOutOfCluster() if err := doAutoscaleList(cmd.OutOrStdout(), client, ns, output); err != nil { logrus.Fatal(err.Error()) } }, } func init() { autoscaleListCmd.Flags().StringP("out", "o", "", "Output format. One of: json|yaml") } func doAutoscaleList(w io.Writer, client kubernetes.Interface, ns, output string) error { asList, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).List(metav1.ListOptions{ LabelSelector: "created-by=kubeless", }) if err != nil { return err } return printAutoscale(w, asList.Items, output) } // printAutoscale formats the output of autoscale list func printAutoscale(w io.Writer, ass []v2beta1.HorizontalPodAutoscaler, output string) error { if output == "" { table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "TARGET", "MIN", "MAX", "METRIC", "VALUE") for _, i := range ass { n := i.Name ns := i.Namespace ta := i.Spec.ScaleTargetRef.Name min := i.Spec.MinReplicas max := i.Spec.MaxReplicas m := "" v := "" if len(i.Spec.Metrics) == 0 { logrus.Errorf("The autoscale %s has bad format. It has no metric defined.", i.Name) continue } if i.Spec.Metrics[0].Object != nil { m = i.Spec.Metrics[0].Object.MetricName v = i.Spec.Metrics[0].Object.TargetValue.String() } else if i.Spec.Metrics[0].Resource != nil { m = string(i.Spec.Metrics[0].Resource.Name) v = fmt.Sprint(*i.Spec.Metrics[0].Resource.TargetAverageUtilization) } table.AddRow(n, ns, ta, fmt.Sprint(*min), fmt.Sprint(max), m, v) } fmt.Fprintln(w, table) } else { for _, i := range ass { switch output { case "json": b, err := json.MarshalIndent(i, "", " ") if err != nil { return err } fmt.Fprintln(w, string(b)) case "yaml": b, err := yaml.Marshal(i) if err != nil { return err } fmt.Fprintln(w, string(b)) default: return fmt.Errorf("Wrong output format. Only accept json|yaml file") } } } return nil } ================================================ FILE: cmd/kubeless/autoscale/autoscaleList_test.go ================================================ package autoscale import ( "bytes" "strings" "testing" av2alpha1 "k8s.io/api/autoscaling/v2beta1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func listAutoscaleOutput(t *testing.T, client kubernetes.Interface, ns, output string) string { var buf bytes.Buffer if err := doAutoscaleList(&buf, client, ns, output); err != nil { t.Fatalf("doList returned error: %v", err) } return buf.String() } func TestAutoscaleList(t *testing.T) { replicas := int32(1) targetAverageUtilization := int32(50) q, _ := resource.ParseQuantity("10k") as1 := av2alpha1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "myns", Labels: map[string]string{ "created-by": "kubeless", }, }, Spec: av2alpha1.HorizontalPodAutoscalerSpec{ ScaleTargetRef: av2alpha1.CrossVersionObjectReference{ Kind: "Deployment", Name: "foo", }, MinReplicas: &replicas, MaxReplicas: replicas, Metrics: []av2alpha1.MetricSpec{ { Type: av2alpha1.ResourceMetricSourceType, Resource: &av2alpha1.ResourceMetricSource{ Name: v1.ResourceCPU, TargetAverageUtilization: &targetAverageUtilization, }, }, }, }, } as2 := av2alpha1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "myns", Labels: map[string]string{ "created-by": "kubeless", }, }, Spec: av2alpha1.HorizontalPodAutoscalerSpec{ ScaleTargetRef: av2alpha1.CrossVersionObjectReference{ Kind: "Deployment", Name: "foo", }, MinReplicas: &replicas, MaxReplicas: replicas, Metrics: []av2alpha1.MetricSpec{ { Type: av2alpha1.ObjectMetricSourceType, Object: &av2alpha1.ObjectMetricSource{ MetricName: "function_calls", TargetValue: q, Target: av2alpha1.CrossVersionObjectReference{ Kind: "Service", Name: "foo", }, }, }, }, }, } as3 := av2alpha1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "foobar", Namespace: "myns", }, } client := fake.NewSimpleClientset(&as1, &as2, &as3) output := listAutoscaleOutput(t, client, "myns", "") t.Log("output is", output) if !strings.Contains(output, "foo") || !strings.Contains(output, "bar") { t.Errorf("table output didn't mention both autoscales") } if strings.Contains(output, "foobar") { t.Errorf("table output shouldn't mention foobar autoscale as it isn't created by kubeless") } // json output output = listAutoscaleOutput(t, client, "myns", "json") t.Log("output is", output) // yaml output output = listAutoscaleOutput(t, client, "myns", "yaml") t.Log("output is", output) } ================================================ FILE: cmd/kubeless/autoscale/autoscale_test.go ================================================ package autoscale import ( "reflect" "testing" "k8s.io/api/autoscaling/v2beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestGetHorizontalAutoscaleDefinition(t *testing.T) { var min, max int32 min = 1 max = 3 funcName := "test-autoscale" ns := "default" value := "10" labels := map[string]string{ "foo": "bar", } metric := "cpu" hpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, labels) if err != nil { t.Fatalf("Unexpected error %v", err) } expectedMeta := metav1.ObjectMeta{ Name: funcName, Namespace: ns, Labels: labels, } if hpa.Spec.ScaleTargetRef.Name != funcName { t.Fatalf("Creating wrong scale target name") } if !reflect.DeepEqual(expectedMeta, hpa.ObjectMeta) { t.Errorf("Expected \n%v to be equal to \n%v", expectedMeta, hpa.ObjectMeta) } if *hpa.Spec.MinReplicas != min { t.Errorf("Unexpected min replicas. Expecting %d got %d", min, *hpa.Spec.MinReplicas) } if hpa.Spec.MaxReplicas != max { t.Errorf("Unexpected max replicas. Expecting %d got %d", max, hpa.Spec.MaxReplicas) } if hpa.Spec.Metrics[0].Type != v2beta1.ResourceMetricSourceType || *hpa.Spec.Metrics[0].Resource.TargetAverageUtilization != int32(10) { t.Error("Unexpected metric") } metric = "qps" hpa, err = getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, labels) if err != nil { t.Fatalf("Unexpected error %v", err) } if hpa.Spec.Metrics[0].Type != v2beta1.ObjectMetricSourceType || hpa.Spec.Metrics[0].Object.TargetValue.String() != "10" { t.Error("Unexpected metric") } } ================================================ FILE: cmd/kubeless/completion/completion.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package completion import ( "github.com/spf13/cobra" "os" ) // CompletionCmd contains first-class command for completion var CompletionCmd = &cobra.Command{ Use: "completion [bash|zsh|fish|powershell]", Short: "Generate completion script", Long: `To load completions: Bash: $ source <(kubeless completion bash) # To load completions for each session, execute once: Linux: $ kubeless completion bash > /etc/bash_completion.d/kubeless MacOS: $ kubeless completion bash > /usr/local/etc/bash_completion.d/kubeless Zsh: # If shell completion is not already enabled in your environment you will need # to enable it. You can execute the following once: $ echo "autoload -U compinit; compinit" >> ~/.zshrc # To load completions for each session, execute once: $ kubeless completion zsh > "${fpath[1]}/_kubeless" # You will need to start a new shell for this setup to take effect. Fish: $ kubeless completion fish | source # To load completions for each session, execute once: $ kubeless completion fish > ~/.config/fish/completions/kubeless.fish `, DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, Args: cobra.ExactValidArgs(1), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": cmd.Root().GenBashCompletion(os.Stdout) case "zsh": cmd.Root().GenZshCompletion(os.Stdout) case "fish": cmd.Root().GenFishCompletion(os.Stdout, true) case "powershell": cmd.Root().GenPowerShellCompletion(os.Stdout) } }, } ================================================ FILE: cmd/kubeless/function/call.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "bytes" "fmt" "strconv" "strings" "time" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" ) var callCmd = &cobra.Command{ Use: "call FLAG", Short: "call function from cli", Long: `call function from cli`, Run: func(cmd *cobra.Command, args []string) { var ( str []byte get bool = false ) if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] data, err := cmd.Flags().GetString("data") if data == "" { get = true } else { str = []byte(data) } if err != nil { logrus.Fatal(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } clientset := utils.GetClientOutOfCluster() svc, err := clientset.CoreV1().Services(ns).Get(funcName, metav1.GetOptions{}) if err != nil { logrus.Fatalf("Unable to find the service for %s", funcName) } port := strconv.Itoa(int(svc.Spec.Ports[0].Port)) if svc.Spec.Ports[0].Name != "" { port = svc.Spec.Ports[0].Name } req := &rest.Request{} if get { req = clientset.CoreV1().RESTClient().Get().Namespace(ns).Resource("services").SubResource("proxy").Name(funcName + ":" + port) } else { req = clientset.CoreV1().RESTClient().Post().Namespace(ns).Resource("services").SubResource("proxy").Name(funcName + ":" + port).Body(bytes.NewBuffer(str)) if utils.IsJSON(string(str)) { req.SetHeader("Content-Type", "application/json") req.SetHeader("event-type", "application/json") } else { req.SetHeader("Content-Type", "application/x-www-form-urlencoded") req.SetHeader("event-type", "application/x-www-form-urlencoded") } // REST package removes trailing slash when building URLs // Causing POST requests to be redirected with an empty body // So we need to manually build the URL req = req.AbsPath(req.URL().Path + "/") } timestamp := time.Now().UTC() eventID, err := utils.GetRandString(11) if err != nil { logrus.Fatalf("Unable to generate ID %v", err) } req.SetHeader("event-id", eventID) req.SetHeader("event-time", timestamp.Format(time.RFC3339)) req.SetHeader("event-namespace", "cli.kubeless.io") res, err := req.Do().Raw() if err != nil { // Properly interpret line breaks logrus.Error(string(res)) if strings.Contains(err.Error(), "status code 408") { // Give a more meaninful error for timeout errors logrus.Fatal("Request timeout exceeded") } else { logrus.Fatal(strings.Replace(err.Error(), `\n`, "\n", -1)) } } fmt.Println(string(res)) }, } func init() { callCmd.Flags().StringP("data", "d", "", "Specify data for function") callCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } ================================================ FILE: cmd/kubeless/function/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "delete a function from Kubeless", Long: `delete a function from Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } kubelessClient, err := utils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } err = utils.DeleteFunctionCustomResource(kubelessClient, funcName, ns) if err != nil { logrus.Fatal(err) } }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } ================================================ FILE: cmd/kubeless/function/deploy.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "encoding/json" "fmt" "strings" "github.com/ghodss/yaml" cronjobApi "github.com/kubeless/cronjob-trigger/pkg/apis/kubeless/v1beta1" cronjobUtils "github.com/kubeless/cronjob-trigger/pkg/utils" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/langruntime" kubelessutil "github.com/kubeless/kubeless/pkg/utils" "github.com/robfig/cron" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var deployCmd = &cobra.Command{ Use: "deploy FLAG", Short: "deploy a function to Kubeless", Long: `deploy a function to Kubeless`, Run: func(cmd *cobra.Command, args []string) { cli := kubelessutil.GetClientOutOfCluster() apiExtensionsClientset := kubelessutil.GetAPIExtensionsClientOutOfCluster() if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] runtime, err := cmd.Flags().GetString("runtime") if err != nil { logrus.Fatal(err) } // Checking runtime parameter if allowed by RBAC, otherwide skip the check config, err := kubelessutil.GetKubelessConfig(cli, apiExtensionsClientset) if config == nil || err != nil { logrus.Warnf("%v. Runtime check is disabled.", err) } else { lr := langruntime.New(config) lr.ReadConfigMap() if runtime != "" && !lr.IsValidRuntime(runtime) { logrus.Fatalf("Invalid runtime: %s. Supported runtimes are: %s", runtime, strings.Join(lr.GetRuntimes(), ", ")) } } schedule, err := cmd.Flags().GetString("schedule") if err != nil { logrus.Fatal(err) } if schedule != "" { if _, err := cron.ParseStandard(schedule); err != nil { logrus.Fatalf("Invalid value for --schedule. " + err.Error()) } } labels, err := cmd.Flags().GetStringSlice("label") if err != nil { logrus.Fatal(err) } envs, err := cmd.Flags().GetStringSlice("env") if err != nil { logrus.Fatal(err) } handler, err := cmd.Flags().GetString("handler") if err != nil { logrus.Fatal(err) } file, err := cmd.Flags().GetString("from-file") if err != nil { logrus.Fatal(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } var nsArg string if ns == "" { ns = kubelessutil.GetDefaultNamespace() } else { nsArg = fmt.Sprintf(" -n %s", ns) } deps, err := cmd.Flags().GetString("dependencies") if err != nil { logrus.Fatal(err) } secrets, err := cmd.Flags().GetStringSlice("secrets") if err != nil { logrus.Fatal(err) } serviceAccount, err := cmd.Flags().GetString("service-account") if err != nil { logrus.Fatal(err) } runtimeImage, err := cmd.Flags().GetString("runtime-image") if err != nil { logrus.Fatal(err) } imagePullPolicy, err := cmd.Flags().GetString("image-pull-policy") if err != nil { logrus.Fatal(err) } if imagePullPolicy != "IfNotPresent" && imagePullPolicy != "Always" && imagePullPolicy != "Never" { err := fmt.Errorf("image-pull-policy must be {IfNotPresent|Always|Never}") logrus.Fatal(err) } mem, err := cmd.Flags().GetString("memory") if err != nil { logrus.Fatal(err) } cpu, err := cmd.Flags().GetString("cpu") if err != nil { logrus.Fatal(err) } timeout, err := cmd.Flags().GetString("timeout") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } headless, err := cmd.Flags().GetBool("headless") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } port, err := cmd.Flags().GetInt32("port") if err != nil { logrus.Fatal(err) } if port <= 0 || port > 65535 { logrus.Fatalf("Invalid port number %d specified", port) } servicePort, err := cmd.Flags().GetInt32("servicePort") if err != nil { logrus.Fatal(err) } if servicePort < 0 || servicePort > 65535 { logrus.Fatalf("Invalid servicePort number %d specified", servicePort) } funcDeps := "" if deps != "" { contentType, err := kubelessutil.GetContentType(deps) if err != nil { logrus.Fatal(err) } funcDeps, _, err = kubelessutil.ParseContent(deps, contentType) if err != nil { logrus.Fatal(err) } } if runtime == "" && runtimeImage == "" { logrus.Fatal("Either `--runtime` or `--runtime-image` flag must be specified.") } if runtime != "" && handler == "" { logrus.Fatal("You must specify handler for the runtime.") } nodeSelectors, err := cmd.Flags().GetStringSlice("node-selectors") if err != nil { logrus.Fatal(err) } defaultFunctionSpec := kubelessApi.Function{} defaultFunctionSpec.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", "function": funcName, } f, err := getFunctionDescription(funcName, ns, handler, file, funcDeps, runtime, runtimeImage, mem, cpu, timeout, imagePullPolicy, serviceAccount, port, servicePort, headless, envs, labels, secrets, nodeSelectors, defaultFunctionSpec) if err != nil { logrus.Fatal(err) } if dryrun == true { if output == "json" { j, err := json.MarshalIndent(f, "", " ") if err != nil { logrus.Fatal(err) } fmt.Println(string(j[:])) return } else if output == "yaml" { y, err := yaml.Marshal(f) if err != nil { logrus.Fatal(err) } fmt.Println(string(y[:])) return } else { logrus.Infof("Output format needs to be yaml or json") return } } kubelessClient, err := kubelessutil.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } logrus.Infof("Deploying function...") err = kubelessutil.CreateFunctionCustomResource(kubelessClient, f) if err != nil { logrus.Fatalf("Failed to deploy %s. Received:\n%s", funcName, err) } logrus.Infof("Function %s submitted for deployment", funcName) logrus.Infof("Check the deployment status executing 'kubeless function ls %s%s'", funcName, nsArg) if schedule != "" { cronJobTrigger := cronjobApi.CronJobTrigger{} cronJobTrigger.TypeMeta = metav1.TypeMeta{ Kind: "CronJobTrigger", APIVersion: "kubeless.io/v1beta1", } cronJobTrigger.ObjectMeta = metav1.ObjectMeta{ Name: funcName, Namespace: ns, } cronJobTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", "function": funcName, } cronJobTrigger.Spec.FunctionName = funcName cronJobTrigger.Spec.Schedule = schedule cronjobClient, err := cronjobUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } err = cronjobUtils.CreateCronJobCustomResource(cronjobClient, &cronJobTrigger) if err != nil { logrus.Fatalf("Failed to deploy cron job trigger %s. Received:\n%s", funcName, err) } } }, } func init() { deployCmd.Flags().StringP("runtime", "r", "", "Specify runtime") deployCmd.Flags().StringP("handler", "", "", "Specify handler") deployCmd.Flags().StringP("from-file", "f", "", "Specify code file or a URL to the code file") deployCmd.Flags().StringSliceP("label", "l", []string{}, "Specify labels of the function. Both separator ':' and '=' are allowed. For example: --label foo1=bar1,foo2:bar2") deployCmd.Flags().StringSliceP("secrets", "", []string{}, "Specify Secrets to be mounted to the functions container. For example: --secrets mySecret") deployCmd.Flags().StringSliceP("env", "e", []string{}, "Specify environment variable of the function. Both separator ':' and '=' are allowed. For example: --env foo1=bar1,foo2:bar2") deployCmd.Flags().StringSliceP("node-selectors", "", []string{}, "Specify node selectors for the function. Both separator ':' and '=' are allowed. For example: --node-selectors key1=val1,key2:val2") deployCmd.Flags().StringP("service-account", "", "", "Specify service account for the function. For example: --service-account controller-acct") deployCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") deployCmd.Flags().StringP("dependencies", "d", "", "Specify a file containing list of dependencies for the function") deployCmd.Flags().StringP("schedule", "", "", "Specify schedule in cron format for scheduled function") deployCmd.Flags().StringP("memory", "", "", "Request amount of memory, which is measured in bytes, for the function. It is expressed as a plain integer or a fixed-point interger with one of these suffies: E, P, T, G, M, K, Ei, Pi, Ti, Gi, Mi, Ki") deployCmd.Flags().StringP("cpu", "", "", "Request amount of cpu for the function, which is measured in units of cores. Please see the following link for more information: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu") deployCmd.Flags().StringP("runtime-image", "", "", "Custom runtime image") deployCmd.Flags().StringP("image-pull-policy", "", "Always", "Image pull policy") deployCmd.Flags().StringP("timeout", "", "180", "Maximum timeout (in seconds) for the function to complete its execution") deployCmd.Flags().StringP("output", "o", "yaml", "Output format") deployCmd.Flags().Bool("headless", false, "Deploy http-based function without a single service IP and load balancing support from Kubernetes. See: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services") deployCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") deployCmd.Flags().Int32("port", 8080, "Deploy http-based function with a custom port") deployCmd.Flags().Int32("servicePort", 0, "Deploy http-based function with a custom service port. If not provided the value of 'port' will be used") } ================================================ FILE: cmd/kubeless/function/describe.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "encoding/json" "fmt" "github.com/ghodss/yaml" "github.com/gosuri/uitable" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var describeCmd = &cobra.Command{ Use: "describe FLAG", Aliases: []string{"ls"}, Short: "describe a function deployed to Kubeless", Long: `describe a function deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatalf("Can not describe function: %v", err) } if ns == "" { ns = utils.GetDefaultNamespace() } output, err := cmd.Flags().GetString("out") if err != nil { logrus.Fatalf("Can not describe function: %v", err) } f, err := utils.GetFunction(funcName, ns) if err != nil { logrus.Fatalf("Can not describe function: %v", err) } err = print(f, funcName, output) if err != nil { logrus.Fatalf("Can not describe function: %v", err) } }, } func init() { describeCmd.Flags().StringP("out", "o", "", "Output format. One of: json|yaml") describeCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } func print(f kubelessApi.Function, name, output string) error { switch output { case "": table := uitable.New() table.MaxColWidth = 80 table.Wrap = true label, err := json.Marshal(f.ObjectMeta.Labels) if err != nil { return err } var env, memory string if len(f.Spec.Deployment.Spec.Template.Spec.Containers) != 0 { b, err := json.Marshal(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env) if err != nil { return err } env = string(b) memory = f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String() } table.AddRow("Name:", name) table.AddRow("Namespace:", f.ObjectMeta.Namespace) table.AddRow("Handler:", f.Spec.Handler) table.AddRow("Runtime:", f.Spec.Runtime) table.AddRow("Label:", string(label)) table.AddRow("Envvar:", env) table.AddRow("Memory:", memory) table.AddRow("Dependencies:", f.Spec.Deps) fmt.Println(table) case "json": b, err := json.MarshalIndent(f, "", " ") if err != nil { return err } fmt.Println(string(b)) case "yaml": b, err := yaml.Marshal(f) if err != nil { return err } fmt.Println(string(b)) default: fmt.Println("Wrong output format. Please use only json|yaml") } return nil } ================================================ FILE: cmd/kubeless/function/function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "fmt" "strings" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" kubelessutil "github.com/kubeless/kubeless/pkg/utils" "github.com/spf13/cobra" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" ) // FunctionCmd contains first-class command for function var FunctionCmd = &cobra.Command{ Use: "function SUBCOMMAND", Short: "function specific operations", Long: `function command allows user to list, deploy, edit, delete functions running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { FunctionCmd.AddCommand(deployCmd) FunctionCmd.AddCommand(deleteCmd) FunctionCmd.AddCommand(listCmd) FunctionCmd.AddCommand(callCmd) FunctionCmd.AddCommand(logsCmd) FunctionCmd.AddCommand(describeCmd) FunctionCmd.AddCommand(updateCmd) FunctionCmd.AddCommand(topCmd) } func getKV(input string) (string, string) { var key, value string if pos := strings.IndexAny(input, "=:"); pos != -1 { key = input[:pos] value = input[pos+1:] } else { // no separator found key = input value = "" } return key, value } func parseLabel(labels []string) map[string]string { funcLabels := make(map[string]string) for _, label := range labels { k, v := getKV(label) funcLabels[k] = v } return funcLabels } func parseEnv(envs []string) []v1.EnvVar { funcEnv := []v1.EnvVar{} for _, env := range envs { k, v := getKV(env) funcEnv = append(funcEnv, v1.EnvVar{ Name: k, Value: v, }) } return funcEnv } func parseResource(in string) (resource.Quantity, error) { if in == "" { return resource.Quantity{}, nil } quantity, err := resource.ParseQuantity(in) if err != nil { return resource.Quantity{}, err } return quantity, nil } func parseNodeSelectors(nodeSelectors []string) map[string]string { funcNodeSelectors := make(map[string]string) for _, nodeSelector := range nodeSelectors { k, v := getKV(nodeSelector) funcNodeSelectors[k] = v } return funcNodeSelectors } func getFunctionDescription(funcName, ns, handler, file, deps, runtime, runtimeImage, mem, cpu, timeout string, imagePullPolicy string, serviceAccount string, port int32, servicePort int32, headless bool, envs, labels, secrets, nodeSelectors []string, defaultFunction kubelessApi.Function) (*kubelessApi.Function, error) { function := defaultFunction function.TypeMeta = metav1.TypeMeta{ Kind: "Function", APIVersion: "kubeless.io/v1beta1", } if handler != "" { function.Spec.Handler = handler } if file != "" { contentType, err := kubelessutil.GetContentType(file) if err != nil { return nil, err } functionContent, checksum, err := kubelessutil.ParseContent(file, contentType) if err != nil { return nil, err } if strings.Contains(contentType, "url") { // set the function to be the URL provided on the command line function.Spec.Function = file } else { function.Spec.Function = functionContent } function.Spec.Checksum = checksum function.Spec.FunctionContentType = contentType } if deps != "" { function.Spec.Deps = deps } if runtime != "" { function.Spec.Runtime = runtime } if timeout != "" { function.Spec.Timeout = timeout } funcEnv := parseEnv(envs) if len(funcEnv) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 { funcEnv = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Env } funcLabels := defaultFunction.ObjectMeta.Labels if len(funcLabels) == 0 { funcLabels = make(map[string]string) } ls := parseLabel(labels) for k, v := range ls { funcLabels[k] = v } function.ObjectMeta = metav1.ObjectMeta{ Name: funcName, Namespace: ns, Labels: funcLabels, } resources := v1.ResourceRequirements{} if mem != "" || cpu != "" { funcMem, err := parseResource(mem) if err != nil { err = fmt.Errorf("Wrong format of the memory value: %v", err) return &kubelessApi.Function{}, err } funcCPU, err := parseResource(cpu) if err != nil { err = fmt.Errorf("Wrong format for cpu value: %v", err) return &kubelessApi.Function{}, err } resource := map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: funcMem, v1.ResourceCPU: funcCPU, } resources = v1.ResourceRequirements{ Limits: resource, Requests: resource, } } else { if len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 { resources = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources } } if len(runtimeImage) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 { runtimeImage = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Image } function.Spec.Deployment.Spec.Template.Spec.Containers = []v1.Container{ { ImagePullPolicy: v1.PullPolicy(imagePullPolicy), Env: funcEnv, Resources: resources, Image: runtimeImage, }, } if serviceAccount != "" { function.Spec.Deployment.Spec.Template.Spec.ServiceAccountName = serviceAccount } if len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 { function.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts } svcSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "http-function-port", NodePort: 0, Protocol: v1.ProtocolTCP, }, }, Selector: funcLabels, Type: v1.ServiceTypeClusterIP, } if headless { svcSpec.ClusterIP = v1.ClusterIPNone } if port != 0 { svcSpec.Ports[0].Port = port svcSpec.Ports[0].TargetPort = intstr.FromInt(int(port)) } if servicePort != 0 { svcSpec.Ports[0].Port = servicePort } function.Spec.ServiceSpec = svcSpec for _, secret := range secrets { function.Spec.Deployment.Spec.Template.Spec.Volumes = append(function.Spec.Deployment.Spec.Template.Spec.Volumes, v1.Volume{ Name: secret + "-vol", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: secret, }, }, }) function.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(function.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: secret + "-vol", MountPath: "/" + secret, }) } funcNodeSelectors := parseNodeSelectors(nodeSelectors) if len(funcNodeSelectors) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.NodeSelector) != 0 { funcNodeSelectors = defaultFunction.Spec.Deployment.Spec.Template.Spec.NodeSelector } function.Spec.Deployment.Spec.Template.Spec.NodeSelector = funcNodeSelectors return &function, nil } func getDeploymentStatus(cli kubernetes.Interface, funcName, ns string) (string, error) { dpm, err := cli.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { return "", err } status := fmt.Sprintf("%d/%d", dpm.Status.ReadyReplicas, dpm.Status.Replicas) if dpm.Status.ReadyReplicas > 0 { status += " READY" } else { status += " NOT READY" } return status, nil } func getFunctions(kubelessClient versioned.Interface, namespace, functionName string) ([]*kubelessApi.Function, error) { if functionName == "" { f, err := kubelessClient.KubelessV1beta1().Functions(namespace).List(metav1.ListOptions{}) if err != nil { return []*kubelessApi.Function{}, err } return f.Items, nil } f, err := kubelessClient.KubelessV1beta1().Functions(namespace).Get(functionName, metav1.GetOptions{}) if err != nil { return []*kubelessApi.Function{}, err } return []*kubelessApi.Function{ f, }, nil } ================================================ FILE: cmd/kubeless/function/function_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "archive/tar" "archive/zip" "compress/gzip" "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "reflect" "testing" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) func TestParseLabel(t *testing.T) { labels := []string{ "foo=bar", "bar:foo", "foobar", } expected := map[string]string{ "foo": "bar", "bar": "foo", "foobar": "", } actual := parseLabel(labels) if eq := reflect.DeepEqual(expected, actual); !eq { t.Errorf("Expect %v got %v", expected, actual) } } func TestParseEnv(t *testing.T) { envs := []string{ "foo=bar", "bar:foo", "foobar", "foo=bar=baz", "qux=bar,baz", } expected := []v1.EnvVar{ { Name: "foo", Value: "bar", }, { Name: "bar", Value: "foo", }, { Name: "foobar", Value: "", }, { Name: "foo", Value: "bar=baz", }, { Name: "qux", Value: "bar,baz", }, } actual := parseEnv(envs) if eq := reflect.DeepEqual(expected, actual); !eq { t.Errorf("Expect %v got %v", expected, actual) } } func TestParseNodeSelectors(t *testing.T) { nodeSelectors := []string{ "foo=bar", "baz:qux", } expected := map[string]string{ "foo": "bar", "baz": "qux", } actual := parseNodeSelectors(nodeSelectors) if eq := reflect.DeepEqual(expected, actual); !eq { t.Errorf("Expect %v got %v", expected, actual) } } func TestGetFunctionDescription(t *testing.T) { // It should parse the given values file, err := ioutil.TempFile("", "test") if err != nil { t.Error(err) } _, err = file.WriteString("function") if err != nil { t.Error(err) } file.Close() defer os.Remove(file.Name()) // clean up result, err := getFunctionDescription("test", "default", "file.handler", file.Name(), "dependencies", "runtime", "test-image", "128Mi", "", "10", "Always", "serviceAccount", 8080, 0, false, []string{"TEST=1"}, []string{"test=1"}, []string{"secretName"}, []string{"foo1=bar1", "baz1:qux1"}, kubelessApi.Function{}) if err != nil { t.Error(err) } parsedMem, _ := parseResource("128Mi") parsedCPU, _ := parseResource("") expectedFunction := kubelessApi.Function{ TypeMeta: metav1.TypeMeta{ Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Labels: map[string]string{ "test": "1", }, }, Spec: kubelessApi.FunctionSpec{ Handler: "file.handler", Runtime: "runtime", Function: "function", Checksum: "sha256:78f9ac018e554365069108352dacabb7fbd15246edf19400677e3b54fe24e126", FunctionContentType: "text", Deps: "dependencies", Timeout: "10", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ ServiceAccountName: "serviceAccount", Containers: []v1.Container{ { Env: []v1.EnvVar{{ Name: "TEST", Value: "1", }}, Resources: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem, v1.ResourceCPU: parsedCPU, }, Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem, v1.ResourceCPU: parsedCPU, }, }, Image: "test-image", ImagePullPolicy: v1.PullAlways, VolumeMounts: []v1.VolumeMount{ { Name: "secretName-vol", MountPath: "/secretName", }, }, }, }, Volumes: []v1.Volume{ { Name: "secretName-vol", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: "secretName", }, }, }, }, NodeSelector: map[string]string{ "foo1": "bar1", "baz1": "qux1", }, }, }, }, }, ServiceSpec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Name: "http-function-port", Protocol: "TCP", Port: 8080, TargetPort: intstr.FromInt(8080)}, }, Selector: map[string]string{ "test": "1", }, Type: v1.ServiceTypeClusterIP, }, }, } if !reflect.DeepEqual(expectedFunction, *result) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedFunction, *result) } // It should take the default values result2, err := getFunctionDescription("test", "default", "", "", "", "", "", "", "", "", "Always", "", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction) if err != nil { t.Error(err) } if !reflect.DeepEqual(expectedFunction, *result2) { t.Errorf("Unexpected result. Expecting:\n %+v\n Received %+v\n", expectedFunction, *result2) } // Given parameters should take precedence from default values file, err = ioutil.TempFile("", "test") if err != nil { t.Error(err) } _, err = file.WriteString("function-modified") if err != nil { t.Error(err) } file.Close() defer os.Remove(file.Name()) // clean up result3, err := getFunctionDescription("test", "default", "file.handler2", file.Name(), "dependencies2", "runtime2", "test-image2", "256Mi", "100m", "20", "Always", "NewServiceAccount", 8080, 0, false, []string{"TEST=2"}, []string{"test=2"}, []string{"secret2"}, []string{"foo2=bar2", "baz2:qux2"}, expectedFunction) if err != nil { t.Error(err) } parsedMem2, _ := parseResource("256Mi") parsedCPU2, _ := parseResource("100m") newFunction := kubelessApi.Function{ TypeMeta: metav1.TypeMeta{ Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Labels: map[string]string{ "test": "2", }, }, Spec: kubelessApi.FunctionSpec{ Handler: "file.handler2", Runtime: "runtime2", Function: "function-modified", FunctionContentType: "text", Checksum: "sha256:1958eb96d7d3cadedd0f327f09322eb7db296afb282ed91aa66cb4ab0dcc3c9f", Deps: "dependencies2", Timeout: "20", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ ServiceAccountName: "NewServiceAccount", Containers: []v1.Container{ { Env: []v1.EnvVar{{ Name: "TEST", Value: "2", }}, Resources: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem2, v1.ResourceCPU: parsedCPU2, }, Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem2, v1.ResourceCPU: parsedCPU2, }, }, Image: "test-image2", ImagePullPolicy: v1.PullAlways, VolumeMounts: []v1.VolumeMount{ { Name: "secretName-vol", MountPath: "/secretName", }, { Name: "secret2-vol", MountPath: "/secret2", }, }, }, }, Volumes: []v1.Volume{ { Name: "secretName-vol", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: "secretName", }, }, }, { Name: "secret2-vol", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: "secret2", }, }, }, }, NodeSelector: map[string]string{ "foo2": "bar2", "baz2": "qux2", }, }, }, }, }, ServiceSpec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Name: "http-function-port", Protocol: "TCP", Port: 8080, TargetPort: intstr.FromInt(8080)}, }, Selector: map[string]string{ "test": "2", }, Type: v1.ServiceTypeClusterIP, }, }, } if !reflect.DeepEqual(newFunction, *result3) { t.Errorf("Unexpected result. Expecting:\n %+v\n Received %+v\n", newFunction, *result3) } // It should detect that it is a Zip file or a compressed tar file file, err = os.Open(file.Name()) if err != nil { t.Error(err) } zipFile, err := os.Create(file.Name() + ".zip") if err != nil { t.Error(err) } defer os.Remove(zipFile.Name()) // clean up tarGzFile, err := os.Create(file.Name() + ".tar.gz") if err != nil { t.Error(err) } defer os.Remove(tarGzFile.Name()) // clean up zipW := zip.NewWriter(zipFile) gzipW := gzip.NewWriter(tarGzFile) tarW := tar.NewWriter(gzipW) info, err := file.Stat() if err != nil { t.Error(err) } zipHeader, err := zip.FileInfoHeader(info) if err != nil { t.Error(err) } writer, err := zipW.CreateHeader(zipHeader) if err != nil { t.Error(err) } _, err = io.Copy(writer, file) if err != nil { t.Error(err) } tarHeader, err := tar.FileInfoHeader(info, info.Name()) if err != nil { t.Error(err) } tarHeader.Name = file.Name() err = tarW.WriteHeader(tarHeader) if err != nil { t.Error(err) } _, err = io.Copy(writer, file) if err != nil { t.Error(err) } file.Close() zipW.Close() zipFile.Close() tarW.Close() gzipW.Close() tarGzFile.Close() result4A, err := getFunctionDescription("test", "default", "file.handler", zipFile.Name(), "dependencies", "runtime", "", "", "", "", "Always", "", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction) if err != nil { t.Error(err) } if result4A.Spec.FunctionContentType != "base64+zip" { t.Errorf("Should return base64+zip, received %s", result4A.Spec.FunctionContentType) } result4B, err := getFunctionDescription("test", "default", "file.handler", tarGzFile.Name(), "dependencies", "runtime", "", "", "", "", "Always", "", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction) if err != nil { t.Error(err) } if result4B.Spec.FunctionContentType != "base64+compressedtar" { t.Errorf("Should return base64+compressedtar, received %s", result4B.Spec.FunctionContentType) } // It should maintain previous HPA definition result5, err := getFunctionDescription("test", "default", "file.handler", file.Name(), "dependencies", "runtime", "test-image", "128Mi", "", "10", "Always", "serviceAccount", 8080, 0, false, []string{"TEST=1"}, []string{"test=1"}, []string{}, []string{}, kubelessApi.Function{ Spec: kubelessApi.FunctionSpec{ HorizontalPodAutoscaler: v2beta1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "previous-hpa", }, }, }, }) if result5.Spec.HorizontalPodAutoscaler.ObjectMeta.Name != "previous-hpa" { t.Error("should maintain previous HPA definition") } // It should set the Port, ServicePort and headless service properly result6, err := getFunctionDescription("test", "default", "file.handler", file.Name(), "dependencies", "runtime", "test-image", "128Mi", "", "", "Always", "serviceAccount", 9091, 9092, true, []string{}, []string{}, []string{}, []string{}, kubelessApi.Function{}) expectedPort := v1.ServicePort{ Name: "http-function-port", Port: 9092, TargetPort: intstr.FromInt(9091), NodePort: 0, Protocol: v1.ProtocolTCP, } if !reflect.DeepEqual(result6.Spec.ServiceSpec.Ports[0], expectedPort) { t.Errorf("Unexpected port definition: %v", result6.Spec.ServiceSpec.Ports[0]) } if result6.Spec.ServiceSpec.ClusterIP != v1.ClusterIPNone { t.Errorf("Unexpected clusterIP %v", result6.Spec.ServiceSpec.ClusterIP) } // it should create a function from a URL ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "function") })) defer ts.Close() expectedURLFunction := kubelessApi.Function{ TypeMeta: metav1.TypeMeta{ Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Labels: map[string]string{ "test": "1", }, }, Spec: kubelessApi.FunctionSpec{ Handler: "file.handler", Runtime: "runtime", Function: ts.URL, Checksum: "sha256:78f9ac018e554365069108352dacabb7fbd15246edf19400677e3b54fe24e126", FunctionContentType: "url", Deps: "dependencies", Timeout: "10", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ ServiceAccountName: "serviceAccount", Containers: []v1.Container{ { Env: []v1.EnvVar{{ Name: "TEST", Value: "1", }}, Resources: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem, v1.ResourceCPU: parsedCPU, }, Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: parsedMem, v1.ResourceCPU: parsedCPU, }, }, Image: "test-image", ImagePullPolicy: v1.PullAlways, VolumeMounts: []v1.VolumeMount{ { Name: "secretName-vol", MountPath: "/secretName", }, }, }, }, Volumes: []v1.Volume{ { Name: "secretName-vol", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: "secretName", }, }, }, }, NodeSelector: map[string]string{ "foo3": "bar3", "baz3": "qux3", }, }, }, }, }, ServiceSpec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Name: "http-function-port", Protocol: "TCP", Port: 8080, TargetPort: intstr.FromInt(8080)}, }, Selector: map[string]string{ "test": "1", }, Type: v1.ServiceTypeClusterIP, }, }, } result7, err := getFunctionDescription("test", "default", "file.handler", ts.URL, "dependencies", "runtime", "test-image", "128Mi", "", "10", "Always", "serviceAccount", 8080, 0, false, []string{"TEST=1"}, []string{"test=1"}, []string{"secretName"}, []string{"foo3=bar3", "baz3:qux3"}, kubelessApi.Function{}) if err != nil { t.Error(err) } if !reflect.DeepEqual(expectedURLFunction, *result7) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedURLFunction, *result7) } // It should handle zip files and compressed tar files from a URL and detect url+zip and url+compressedtar encoding respectively zipBytes, err := ioutil.ReadFile(zipFile.Name()) if err != nil { t.Error(err) } ts2A := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(zipBytes) })) defer ts2A.Close() expectedURLFunction.Spec.FunctionContentType = "url+zip" expectedURLFunction.Spec.Function = ts2A.URL + "/test.zip" expectedURLFunction.Spec.Checksum, err = getSha256(zipBytes) if err != nil { t.Error(err) } result8A, err := getFunctionDescription("test", "default", "file.handler", ts2A.URL+"/test.zip", "dependencies", "runtime", "test-image", "128Mi", "", "10", "Always", "serviceAccount", 8080, 0, false, []string{"TEST=1"}, []string{"test=1"}, []string{"secretName"}, []string{"foo3=bar3", "baz3:qux3"}, kubelessApi.Function{}) if err != nil { t.Error(err) } if !reflect.DeepEqual(expectedURLFunction, *result8A) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedURLFunction, *result8A) } tarGzBytes, err := ioutil.ReadFile(tarGzFile.Name()) if err != nil { t.Error(err) } ts2B := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(tarGzBytes) })) defer ts2B.Close() expectedURLFunction.Spec.FunctionContentType = "url+compressedtar" expectedURLFunction.Spec.Function = ts2B.URL + "/test.tar.gz" expectedURLFunction.Spec.Checksum, err = getSha256(tarGzBytes) if err != nil { t.Error(err) } result8B, err := getFunctionDescription("test", "default", "file.handler", ts2B.URL+"/test.tar.gz", "dependencies", "runtime", "test-image", "128Mi", "", "10", "Always", "serviceAccount", 8080, 0, false, []string{"TEST=1"}, []string{"test=1"}, []string{"secretName"}, []string{"foo3=bar3", "baz3:qux3"}, kubelessApi.Function{}) if err != nil { t.Error(err) } if !reflect.DeepEqual(expectedURLFunction, *result8B) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedURLFunction, *result8B) } // end test } func getSha256(bytes []byte) (string, error) { h := sha256.New() _, err := h.Write(bytes) if err != nil { return "", err } checksum := hex.EncodeToString(h.Sum(nil)) return "sha256:" + checksum, nil } ================================================ FILE: cmd/kubeless/function/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "bytes" "encoding/json" "fmt" "io" "strings" "github.com/ghodss/yaml" "github.com/gosuri/uitable" "github.com/sirupsen/logrus" "github.com/spf13/cobra" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" "github.com/kubeless/kubeless/pkg/utils" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all functions deployed to Kubeless", Long: `list all functions deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { output, err := cmd.Flags().GetString("out") if err != nil { logrus.Fatal(err.Error()) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = utils.GetDefaultNamespace() } kubelessClient, err := utils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not list functions: %v", err) } apiV1Client := utils.GetClientOutOfCluster() if err := doList(cmd.OutOrStdout(), kubelessClient, apiV1Client, ns, output, args); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("out", "o", "", "Output format. One of: json|yaml") listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } func doList(w io.Writer, kubelessClient versioned.Interface, apiV1Client kubernetes.Interface, ns, output string, args []string) error { var list []*kubelessApi.Function if len(args) == 0 { funcList, err := kubelessClient.KubelessV1beta1().Functions(ns).List(metav1.ListOptions{}) if err != nil { return err } list = funcList.Items } else { list = make([]*kubelessApi.Function, 0, len(args)) for _, arg := range args { f, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(arg, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Error listing function %s: %v", arg, err) } list = append(list, f) } } return printFunctions(w, list, apiV1Client, output) } func parseDeps(deps, runtime string) (res string, err error) { if deps != "" { if strings.Contains(runtime, "nodejs") { pkgjson := make(map[string]interface{}) err = json.Unmarshal([]byte(deps), &pkgjson) if err != nil { return } if pkgjson["dependencies"] != nil { dependencies := []string{} for pkg, ver := range pkgjson["dependencies"].(map[string]interface{}) { dependencies = append(dependencies, pkg+": "+ver.(string)) } res = strings.Join(dependencies, "\n") } } else { res = deps } } return } // printFunctions formats the output of function list func printFunctions(w io.Writer, functions []*kubelessApi.Function, cli kubernetes.Interface, output string) error { if output == "" { table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "HANDLER", "RUNTIME", "DEPENDENCIES", "STATUS") for _, f := range functions { n := f.ObjectMeta.Name h := f.Spec.Handler r := f.Spec.Runtime ns := f.ObjectMeta.Namespace status, err := getDeploymentStatus(cli, f.ObjectMeta.Name, f.ObjectMeta.Namespace) if err != nil && k8sErrors.IsNotFound(err) { status = "MISSING: Check controller logs" } else if err != nil { return err } deps, err := parseDeps(f.Spec.Deps, r) if err != nil { return err } table.AddRow(n, ns, h, r, deps, status) } fmt.Fprintln(w, table) } else if output == "wide" { table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "HANDLER", "RUNTIME", "TYPE", "TOPIC", "DEPENDENCIES", "STATUS", "MEMORY", "ENV", "LABEL", "SCHEDULE") for _, f := range functions { n := f.ObjectMeta.Name h := f.Spec.Handler r := f.Spec.Runtime deps, err := parseDeps(f.Spec.Deps, r) if err != nil { return err } ns := f.ObjectMeta.Namespace status, err := getDeploymentStatus(cli, f.ObjectMeta.Name, f.ObjectMeta.Namespace) if err != nil && k8sErrors.IsNotFound(err) { status = "MISSING: Check controller logs" } else if err != nil { return err } mem := "" env := "" if len(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests) != 0 { mem = f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String() } if len(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env) != 0 { var buffer bytes.Buffer for _, e := range f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env { buffer.WriteString(e.Name + " = " + e.Value + "\n") } env = buffer.String() } label := "" if len(f.ObjectMeta.Labels) > 0 { var buffer bytes.Buffer for k, v := range f.ObjectMeta.Labels { buffer.WriteString(k + " : " + v + "\n") } label = buffer.String() } table.AddRow(n, ns, h, r, deps, status, mem, env, label) } fmt.Fprintln(w, table) } else { switch output { case "json": b, err := json.MarshalIndent(functions, "", " ") if err != nil { return err } fmt.Fprintln(w, string(b)) case "yaml": b, err := yaml.Marshal(functions) if err != nil { return err } fmt.Fprintln(w, string(b)) default: return fmt.Errorf("Wrong output format. Please use only json|yaml") } } return nil } ================================================ FILE: cmd/kubeless/function/list_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "bytes" "regexp" "strings" "testing" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" fFake "github.com/kubeless/kubeless/pkg/client/clientset/versioned/fake" ) func listOutput(t *testing.T, client versioned.Interface, apiV1Client kubernetes.Interface, ns, output string, args []string) string { var buf bytes.Buffer if err := doList(&buf, client, apiV1Client, ns, output, args); err != nil { t.Fatalf("doList returned error: %v", err) } return buf.String() } func TestList(t *testing.T) { funcMem, _ := parseResource("128Mi") listObj := kubelessApi.FunctionList{ Items: []*kubelessApi.Function{ { ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "myns", }, Spec: kubelessApi.FunctionSpec{ Handler: "fhandler", Function: "ffunction", Runtime: "fruntime", Deps: "fdeps", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{{}}, }, }, }, }, }, }, { ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "myns", Labels: map[string]string{ "foo": "bar", }, }, Spec: kubelessApi.FunctionSpec{ Handler: "bhandler", Function: "bfunction", Runtime: "nodejs6", Deps: "{\"dependencies\": {\"test\": \"^1.0.0\"}}", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Env: []v1.EnvVar{ { Name: "foo", Value: "bar", }, { Name: "foo2", Value: "bar2", }, }, Resources: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: funcMem, }, Requests: map[v1.ResourceName]resource.Quantity{ v1.ResourceMemory: funcMem, }, }, }, }, }, }, }, }, }, }, { ObjectMeta: metav1.ObjectMeta{ Name: "wrong", Namespace: "myns", }, Spec: kubelessApi.FunctionSpec{ Handler: "fhandler", Function: "ffunction", Runtime: "fruntime", Deps: "fdeps", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{{}}, }, }, }, }, }, }, }, } client := fFake.NewSimpleClientset(listObj.Items[0], listObj.Items[1], listObj.Items[2]) deploymentFoo := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "myns", }, Status: appsv1.DeploymentStatus{ Replicas: int32(1), ReadyReplicas: int32(1), }, } deploymentBar := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "myns", }, Status: appsv1.DeploymentStatus{ Replicas: int32(2), ReadyReplicas: int32(0), }, } apiV1Client := fake.NewSimpleClientset(&deploymentFoo, &deploymentBar) // No arg -> list everything in namespace output := listOutput(t, client, apiV1Client, "myns", "", []string{}) t.Log("output is", output) if !strings.Contains(output, "foo") || !strings.Contains(output, "bar") { t.Errorf("table output didn't mention both functions") } // Status m, err := regexp.MatchString("foo.*1/1 READY", output) if err != nil { t.Fatal(err) } if !m { t.Errorf("table output didn't mention deployment status") } m, err = regexp.MatchString("bar.*0/2 NOT READY", output) if err != nil { t.Fatal(err) } if !m { t.Errorf("table output didn't mention deployment status") } m, err = regexp.MatchString("wrong.*MISSING", output) if err != nil { t.Fatal(err) } if !m { t.Errorf("table output didn't mention deployment status") } // Explicit arg(s) output = listOutput(t, client, apiV1Client, "myns", "", []string{"foo"}) t.Log("output is", output) if !strings.Contains(output, "foo") { t.Errorf("table output didn't mention explicit function foo") } if strings.Contains(output, "bar") { t.Errorf("table output mentions unrequested function bar") } if strings.Contains(output, "test: ^1.0.0") { t.Errorf("table output doesn't show parsed dependencies") } // TODO: Actually validate the output of the following. // Probably need to fix output framing first. // json output output = listOutput(t, client, apiV1Client, "myns", "json", []string{}) t.Log("output is", output) if !strings.Contains(output, "foo") || !strings.Contains(output, "bar") { t.Errorf("table output didn't mention both functions") } // yaml output output = listOutput(t, client, apiV1Client, "myns", "yaml", []string{}) t.Log("output is", output) if !strings.Contains(output, "128Mi") { t.Errorf("table output didn't mention proper memory of function") } // wide output output = listOutput(t, client, apiV1Client, "myns", "wide", []string{}) t.Log("output is", output) if !strings.Contains(output, "foo = bar") { t.Errorf("table output didn't mention proper env of function") } } ================================================ FILE: cmd/kubeless/function/logs.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "io" "os" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/core/v1" ) var logsCmd = &cobra.Command{ Use: "logs FLAG", Short: "get logs from a running function", Long: `get logs from a running function`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] follow, err := cmd.Flags().GetBool("follow") if err != nil { logrus.Fatal(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } k8sClient := utils.GetClientOutOfCluster() if err != nil { logrus.Fatalf("Getting log failed: %v", err) } pods, err := utils.GetPodsByLabel(k8sClient, ns, "function", funcName) if err != nil { logrus.Fatalf("Can't find the function pod: %v", err) } readyPod, err := utils.GetReadyPod(pods) if err != nil { logrus.Fatalf("No function pod is running: %v", err) } podLog := &v1.PodLogOptions{ Container: funcName, Follow: follow, } req := k8sClient.Core().Pods(ns).GetLogs(readyPod.Name, podLog) readCloser, err := req.Stream() if err != nil { logrus.Fatalf("Getting log failed: %v", err) } defer readCloser.Close() io.Copy(os.Stdout, readCloser) }, } func init() { logsCmd.Flags().BoolP("follow", "f", false, "Specify if the logs should be streamed.") logsCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } ================================================ FILE: cmd/kubeless/function/top.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "encoding/json" "fmt" "io" "sort" "time" "github.com/ghodss/yaml" "github.com/gosuri/uitable" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" "github.com/kubeless/kubeless/pkg/utils" ) var topCmd = &cobra.Command{ Use: "top", Aliases: []string{"stats"}, Short: "display function metrics", Long: `display function metrics`, Run: func(cmd *cobra.Command, args []string) { functionName, err := cmd.Flags().GetString("function") if err != nil { logrus.Fatal(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } output, err := cmd.Flags().GetString("out") if err != nil { logrus.Fatal(err.Error()) } apiV1Client := utils.GetClientOutOfCluster() kubelessClient, err := utils.GetKubelessClientOutCluster() handler := &utils.PrometheusMetricsHandler{} err = doTop(cmd.OutOrStdout(), kubelessClient, apiV1Client, handler, ns, functionName, output) if err != nil { logrus.Fatal(err.Error()) } }, } func init() { topCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") topCmd.Flags().StringP("function", "f", "", "Specify the function") topCmd.Flags().StringP("out", "o", "", "Output format. One of: json|yaml") } func doTop(w io.Writer, kubelessClient versioned.Interface, apiV1Client kubernetes.Interface, handler utils.MetricsRetriever, ns, functionName, output string) error { functions, err := getFunctions(kubelessClient, ns, functionName) if err != nil { return fmt.Errorf("Error listing functions: %v", err) } ch := make(chan []*utils.Metric, len(functions)) for _, f := range functions { go func(f *kubelessApi.Function) { ch <- utils.GetFunctionMetrics(apiV1Client, handler, ns, f.ObjectMeta.Name) }(f) } var metrics []*utils.Metric i := 0 for i < len(functions) { select { case r := <-ch: metrics = append(metrics, r...) i++ // timeout all go routines after 5 seconds to avoid hanging at the cmd line case <-time.After(5 * time.Second): i = len(functions) } } // sort the results - useful when using 'watch kubeless function top' sort.Slice(metrics, func(i, j int) bool { return metrics[i].FunctionName < metrics[j].FunctionName }) return printTop(w, metrics, apiV1Client, output) } func printTop(w io.Writer, metrics []*utils.Metric, cli kubernetes.Interface, output string) error { if output == "" { table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "METHOD", "TOTAL_CALLS", "TOTAL_FAILURES", "TOTAL_DURATION_SECONDS", "AVG_DURATION_SECONDS", "MESSAGE") for _, f := range metrics { if f.Message != "" { table.AddRow(f.FunctionName, f.Namespace, "", "", "", "", "", f.Message) } else { table.AddRow(f.FunctionName, f.Namespace, f.Method, f.TotalCalls, f.TotalFailures, f.TotalDurationSeconds, f.AvgDurationSeconds, "") } } fmt.Fprintln(w, table) } else { switch output { case "json": b, err := json.MarshalIndent(metrics, "", " ") if err != nil { return err } fmt.Fprintln(w, string(b)) case "yaml": b, err := yaml.Marshal(metrics) if err != nil { return err } fmt.Fprintln(w, string(b)) default: return fmt.Errorf("Wrong output format. Please use only json|yaml") } } return nil } ================================================ FILE: cmd/kubeless/function/top_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "bytes" "fmt" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" fFake "github.com/kubeless/kubeless/pkg/client/clientset/versioned/fake" "github.com/kubeless/kubeless/pkg/utils" ) type testMetricsHandler struct{} // handler used for testing purposes only // satisfies the MetricsRetriever interface, gets metrics from the test http server (URL to test http server stored in svc.SelfLink field) func (h *testMetricsHandler) GetRawMetrics(apiClient kubernetes.Interface, namespace, functionName string) ([]byte, error) { svc, err := apiClient.CoreV1().Services(namespace).Get(functionName, metav1.GetOptions{}) if err != nil { return []byte{}, err } b, err := http.Get(svc.SelfLink) if err != nil { return nil, err } defer b.Body.Close() return ioutil.ReadAll(b.Body) } func topOutput(t *testing.T, client versioned.Interface, apiV1Client kubernetes.Interface, h utils.MetricsRetriever, ns, functionName, output string) string { var buf bytes.Buffer if err := doTop(&buf, client, apiV1Client, h, ns, functionName, output); err != nil { t.Fatalf("doTop returned error: %v", err) } return buf.String() } func TestTop(t *testing.T) { // setup test server to serve the /metrics endpoint ts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 1.6846e-05 go_gc_duration_seconds{quantile="0.25"} 3.9124e-05 go_gc_duration_seconds{quantile="0.5"} 0.000147183 go_gc_duration_seconds{quantile="0.75"} 0.000958419 go_gc_duration_seconds{quantile="1"} 0.00796035 go_gc_duration_seconds_sum 2.50781303 go_gc_duration_seconds_count 3424 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 7 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.10.2"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 2.28336e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 9.9682544e+09 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 1.500081e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 1.2698678e+07 # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. # TYPE go_memstats_gc_cpu_fraction gauge go_memstats_gc_cpu_fraction 0.0001214506861340198 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 405504 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 2.28336e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 2.6624e+06 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 3.072e+06 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 6280 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 0 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 5.7344e+06 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.528573398809276e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 88701 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 1.2704958e+07 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 3472 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 16384 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 25688 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 32768 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 4.194304e+06 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 738631 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 557056 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 557056 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 8.984824e+06 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 10 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 25.88 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 8 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 1.3942784e+07 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.52853941225e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 1.57294592e+08 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 10798 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0 `) })) defer ts2.Close() // setup test server to serve the /metrics endpoint ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, `# HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 815255552.0 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 25001984.0 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1528507334.03 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 54.72 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 8.0 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1048576.0 # HELP python_info Python platform information # TYPE python_info gauge python_info{implementation="CPython",major="2",minor="7",patchlevel="9",version="2.7.9"} 1.0 # HELP function_failures_total Number of exceptions in user function # TYPE function_failures_total counter function_failures_total{method="GET"} 0.0 function_failures_total{method="POST"} 0.0 # HELP function_calls_total Number of calls to user function # TYPE function_calls_total counter function_calls_total{method="GET"} 254.0 function_calls_total{method="POST"} 296.0 # HELP function_duration_seconds Duration of user function in seconds # TYPE function_duration_seconds histogram function_duration_seconds_bucket{le="0.005",method="GET"} 8.0 function_duration_seconds_bucket{le="0.01",method="GET"} 191.0 function_duration_seconds_bucket{le="0.025",method="GET"} 248.0 function_duration_seconds_bucket{le="0.05",method="GET"} 253.0 function_duration_seconds_bucket{le="0.075",method="GET"} 253.0 function_duration_seconds_bucket{le="0.1",method="GET"} 253.0 function_duration_seconds_bucket{le="0.25",method="GET"} 254.0 function_duration_seconds_bucket{le="0.5",method="GET"} 254.0 function_duration_seconds_bucket{le="0.75",method="GET"} 254.0 function_duration_seconds_bucket{le="1.0",method="GET"} 254.0 function_duration_seconds_bucket{le="2.5",method="GET"} 254.0 function_duration_seconds_bucket{le="5.0",method="GET"} 254.0 function_duration_seconds_bucket{le="7.5",method="GET"} 254.0 function_duration_seconds_bucket{le="10.0",method="GET"} 254.0 function_duration_seconds_bucket{le="+Inf",method="GET"} 254.0 function_duration_seconds_count{method="GET"} 254.0 function_duration_seconds_sum{method="GET"} 2.863368272781372 function_duration_seconds_bucket{le="0.005",method="POST"} 1.0 function_duration_seconds_bucket{le="0.01",method="POST"} 157.0 function_duration_seconds_bucket{le="0.025",method="POST"} 296.0 function_duration_seconds_bucket{le="0.05",method="POST"} 296.0 function_duration_seconds_bucket{le="0.075",method="POST"} 296.0 function_duration_seconds_bucket{le="0.1",method="POST"} 296.0 function_duration_seconds_bucket{le="0.25",method="POST"} 296.0 function_duration_seconds_bucket{le="0.5",method="POST"} 296.0 function_duration_seconds_bucket{le="0.75",method="POST"} 296.0 function_duration_seconds_bucket{le="1.0",method="POST"} 296.0 function_duration_seconds_bucket{le="2.5",method="POST"} 296.0 function_duration_seconds_bucket{le="5.0",method="POST"} 296.0 function_duration_seconds_bucket{le="7.5",method="POST"} 296.0 function_duration_seconds_bucket{le="10.0",method="POST"} 296.0 function_duration_seconds_bucket{le="+Inf",method="POST"} 296.0 function_duration_seconds_count{method="POST"} 296.0 function_duration_seconds_sum{method="POST"} 3.4116291999816895 `) })) defer ts.Close() function1Name := "pyFunc" function2Name := "goFunc" namespace := "myns" listObj := kubelessApi.FunctionList{ Items: []*kubelessApi.Function{ { ObjectMeta: metav1.ObjectMeta{ Name: function1Name, Namespace: namespace, }, Spec: kubelessApi.FunctionSpec{ Handler: "fhandler", Function: function1Name, Runtime: "pyruntime", Deps: "pydeps", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{{}}, }, }, }, }, }, }, { ObjectMeta: metav1.ObjectMeta{ Name: function2Name, Namespace: namespace, }, Spec: kubelessApi.FunctionSpec{ Handler: "gohandler", Function: function2Name, Runtime: "goruntime", Deps: "godeps", Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{{}}, }, }, }, }, }, }, }, } client := fFake.NewSimpleClientset(listObj.Items[0], listObj.Items[1]) deploymentPy := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: function1Name, Namespace: namespace, }, Status: appsv1.DeploymentStatus{ Replicas: int32(1), ReadyReplicas: int32(1), }, } deploymentGo := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: function2Name, Namespace: namespace, }, Status: appsv1.DeploymentStatus{ Replicas: int32(1), ReadyReplicas: int32(1), }, } serviceGo := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: function2Name, Namespace: namespace, SelfLink: ts2.URL, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "p1", Port: int32(8080), TargetPort: intstr.FromInt(8080), NodePort: 0, Protocol: v1.ProtocolTCP, }, }, }, } servicePy := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: function1Name, Namespace: namespace, SelfLink: ts.URL, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "p1", Port: int32(8080), TargetPort: intstr.FromInt(8080), NodePort: 0, Protocol: v1.ProtocolTCP, }, }, }, } apiV1Client := fake.NewSimpleClientset(&deploymentPy, &servicePy, &deploymentGo, &serviceGo) handler := &testMetricsHandler{} // List multiple functions output := topOutput(t, client, apiV1Client, handler, namespace, "", "") t.Log("output is", output) if !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) { t.Errorf("table output didn't match FUNCTION or NAMESPACE") } if !strings.Contains(output, "GET") || !strings.Contains(output, "POST") { t.Errorf("table output didn't match on METHOD") } if !strings.Contains(output, "2.86336") || !strings.Contains(output, "3.41162") { t.Errorf("table output didn't match on TOTAL_DURATION_SECONDS") } // verify calculated fields if !strings.Contains(output, "0.0112731") || !strings.Contains(output, "0.0115257") { t.Errorf("table output didn't match on AVG_DURATION_SECONDS") } // Get single function output = topOutput(t, client, apiV1Client, handler, namespace, function2Name, "") t.Log("output is", output) if strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) { t.Errorf("table output didn't match FUNCTION or NAMESPACE") } // json output output = topOutput(t, client, apiV1Client, handler, namespace, "", "json") t.Log("output is", output) if !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) { t.Errorf("table output didn't match FUNCTION or NAMESPACE") } if !strings.Contains(output, "GET") || !strings.Contains(output, "POST") { t.Errorf("table output didn't match on METHOD") } if !strings.Contains(output, "2.86336") || !strings.Contains(output, "3.41162") { t.Errorf("table output didn't match on TOTAL_DURATION_SECONDS") } // verify calculated fields if !strings.Contains(output, "0.0112731") || !strings.Contains(output, "0.0115257") { t.Errorf("table output didn't match on AVG_DURATION_SECONDS") } // yaml output output = topOutput(t, client, apiV1Client, handler, namespace, "", "yaml") t.Log("output is", output) if !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) { t.Errorf("table output didn't match FUNCTION or NAMESPACE") } if !strings.Contains(output, "GET") || !strings.Contains(output, "POST") { t.Errorf("table output didn't match on METHOD") } if !strings.Contains(output, "2.86336") || !strings.Contains(output, "3.41162") { t.Errorf("table output didn't match on TOTAL_DURATION_SECONDS") } // verify calculated fields if !strings.Contains(output, "0.0112731") || !strings.Contains(output, "0.0115257") { t.Errorf("table output didn't match on AVG_DURATION_SECONDS") } } ================================================ FILE: cmd/kubeless/function/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package function import ( "encoding/json" "fmt" "strings" "github.com/ghodss/yaml" "github.com/kubeless/kubeless/pkg/langruntime" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "update a function on Kubeless", Long: `update a function on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cli := utils.GetClientOutOfCluster() apiExtensionsClientset := utils.GetAPIExtensionsClientOutOfCluster() config, err := utils.GetKubelessConfig(cli, apiExtensionsClientset) if err != nil { logrus.Fatalf("Unable to read the configmap: %v", err) } var lr = langruntime.New(config) lr.ReadConfigMap() if len(args) != 1 { logrus.Fatal("Need exactly one argument - function name") } funcName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } var nsArg string if ns == "" { ns = utils.GetDefaultNamespace() } else { nsArg = fmt.Sprintf(" -n %s", ns) } handler, err := cmd.Flags().GetString("handler") if err != nil { logrus.Fatal(err) } file, err := cmd.Flags().GetString("from-file") if err != nil { logrus.Fatal(err) } secrets, err := cmd.Flags().GetStringSlice("secrets") if err != nil { logrus.Fatal(err) } serviceAccount, err := cmd.Flags().GetString("service-account") if err != nil { logrus.Fatal(err) } runtime, err := cmd.Flags().GetString("runtime") if err != nil { logrus.Fatal(err) } if runtime != "" && !lr.IsValidRuntime(runtime) { logrus.Fatalf("Invalid runtime: %s. Supported runtimes are: %s", runtime, strings.Join(lr.GetRuntimes(), ", ")) } labels, err := cmd.Flags().GetStringSlice("label") if err != nil { logrus.Fatal(err) } envs, err := cmd.Flags().GetStringSlice("env") if err != nil { logrus.Fatal(err) } runtimeImage, err := cmd.Flags().GetString("runtime-image") if err != nil { logrus.Fatal(err) } imagePullPolicy, err := cmd.Flags().GetString("image-pull-policy") if err != nil { logrus.Fatal(err) } if imagePullPolicy != "IfNotPresent" && imagePullPolicy != "Always" && imagePullPolicy != "Never" { err := fmt.Errorf("image-pull-policy must be {IfNotPresent|Always|Never}") logrus.Fatal(err) } mem, err := cmd.Flags().GetString("memory") if err != nil { logrus.Fatal(err) } cpu, err := cmd.Flags().GetString("cpu") if err != nil { logrus.Fatal(err) } timeout, err := cmd.Flags().GetString("timeout") if err != nil { logrus.Fatal(err) } deps, err := cmd.Flags().GetString("dependencies") if err != nil { logrus.Fatal(err) } funcDeps := "" if deps != "" { contentType, err := utils.GetContentType(deps) if err != nil { logrus.Fatal(err) } funcDeps, _, err = utils.ParseContent(deps, contentType) if err != nil { logrus.Fatal(err) } } headless, err := cmd.Flags().GetBool("headless") if err != nil { logrus.Fatal(err) } port, err := cmd.Flags().GetInt32("port") if err != nil { logrus.Fatal(err) } if port <= 0 || port > 65535 { logrus.Fatalf("Invalid port number %d specified", port) } servicePort, err := cmd.Flags().GetInt32("servicePort") if err != nil { logrus.Fatal(err) } if servicePort < 0 || servicePort > 65535 { logrus.Fatalf("Invalid servicePort number %d specified", servicePort) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } nodeSelectors, err := cmd.Flags().GetStringSlice("node-selectors") if err != nil { logrus.Fatal(err) } previousFunction, err := utils.GetFunction(funcName, ns) if err != nil { logrus.Fatal(err) } f, err := getFunctionDescription(funcName, ns, handler, file, funcDeps, runtime, runtimeImage, mem, cpu, timeout, imagePullPolicy, serviceAccount, port, servicePort, headless, envs, labels, secrets, nodeSelectors, previousFunction) if err != nil { logrus.Fatal(err) } if dryrun == true { if output == "json" { j, err := json.MarshalIndent(f, "", " ") if err != nil { logrus.Fatal(err) } fmt.Println(string(j[:])) return } else if output == "yaml" { y, err := yaml.Marshal(f) if err != nil { logrus.Fatal(err) } fmt.Println(string(y[:])) return } else { logrus.Infof("Output format needs to be yaml or json") return } } kubelessClient, err := utils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } logrus.Infof("Redeploying function...") err = utils.PatchFunctionCustomResource(kubelessClient, f) if err != nil { logrus.Fatal(err) } logrus.Infof("Function %s submitted for deployment", funcName) logrus.Infof("Check the deployment status executing 'kubeless function ls %s%s'", funcName, nsArg) }, } func init() { updateCmd.Flags().StringP("runtime", "r", "", "Specify runtime") updateCmd.Flags().StringP("handler", "", "", "Specify handler") updateCmd.Flags().StringP("from-file", "f", "", "Specify code file or a URL to the code file") updateCmd.Flags().StringP("memory", "", "", "Request amount of memory for the function") updateCmd.Flags().StringP("cpu", "", "", "Request amount of cpu for the function.") updateCmd.Flags().StringSliceP("label", "l", []string{}, "Specify labels of the function") updateCmd.Flags().StringSliceP("secrets", "", []string{}, "Specify Secrets to be mounted to the functions container. For example: --secrets mySecret") updateCmd.Flags().StringSliceP("env", "e", []string{}, "Specify environment variable of the function") updateCmd.Flags().StringSliceP("node-selectors", "", []string{}, "Specify node selectors for the function") updateCmd.Flags().StringP("service-account", "", "", "Specify service account for the function. For example: --service-account controller-acct") updateCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") updateCmd.Flags().StringP("dependencies", "d", "", "Specify a file containing list of dependencies for the function") updateCmd.Flags().StringP("runtime-image", "", "", "Custom runtime image") updateCmd.Flags().StringP("image-pull-policy", "", "Always", "Image pull policy") updateCmd.Flags().StringP("timeout", "", "180", "Maximum timeout (in seconds) for the function to complete its execution") updateCmd.Flags().Bool("headless", false, "Deploy http-based function without a single service IP and load balancing support from Kubernetes. See: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services") updateCmd.Flags().Int32("port", 8080, "Deploy http-based function with a custom port") updateCmd.Flags().Int32("servicePort", 0, "Deploy http-based function with a custom service port") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/getserverconfig/getServerConfig.go ================================================ package getserverconfig import ( "strings" "github.com/kubeless/kubeless/pkg/langruntime" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) // GetServerConfigCmd contains first-class command for displaying the current server config var GetServerConfigCmd = &cobra.Command{ Use: "get-server-config", Short: "Print the current configuration of the controller", Long: ``, Run: func(cmd *cobra.Command, args []string) { cli := utils.GetClientOutOfCluster() apiExtensionsClientset := utils.GetAPIExtensionsClientOutOfCluster() config, err := utils.GetKubelessConfig(cli, apiExtensionsClientset) if err != nil { logrus.Fatalf("Unable to read the configmap: %v", err) } var lr = langruntime.New(config) lr.ReadConfigMap() logrus.Info("Current Server Config:") logrus.Infof("Supported Runtimes are: %s", strings.Join(lr.GetRuntimes(), ", ")) }, } ================================================ FILE: cmd/kubeless/kubeless.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Serverless framework for Kubernetes. package main import ( "os" "github.com/kubeless/kubeless/cmd/kubeless/autoscale" "github.com/kubeless/kubeless/cmd/kubeless/completion" "github.com/kubeless/kubeless/cmd/kubeless/function" "github.com/kubeless/kubeless/cmd/kubeless/getserverconfig" "github.com/kubeless/kubeless/cmd/kubeless/topic" "github.com/kubeless/kubeless/cmd/kubeless/trigger" "github.com/kubeless/kubeless/cmd/kubeless/version" "github.com/spf13/cobra" ) var globalUsage = `` //TODO: add explanation func newRootCmd() *cobra.Command { cmd := &cobra.Command{ Use: "kubeless", Short: "Serverless framework for Kubernetes", Long: globalUsage, } cmd.AddCommand(function.FunctionCmd, topic.TopicCmd, version.VersionCmd, autoscale.AutoscaleCmd, getserverconfig.GetServerConfigCmd, trigger.TriggerCmd, completion.CompletionCmd) return cmd } func main() { cmd := newRootCmd() if err := cmd.Execute(); err != nil { os.Exit(1) } } ================================================ FILE: cmd/kubeless/topic/topic.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topic import ( "github.com/spf13/cobra" ) // TopicCmd contains first-class command for topic var TopicCmd = &cobra.Command{ Use: "topic SUBCOMMAND", Short: "manage message topics in Kubeless", Long: `topic command allows user to list, create, delete topics on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { cmds := []*cobra.Command{topicCreateCmd, topicDeleteCmd, topicListCmd, topicPublishCmd} for _, cmd := range cmds { TopicCmd.AddCommand(cmd) cmd.Flags().StringP("kafka-namespace", "", "kubeless", "Namespace where kafka-controller is deployed. It will default to 'kubeless'") } } ================================================ FILE: cmd/kubeless/topic/topicCreate.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topic import ( "fmt" "io" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) var topicCreateCmd = &cobra.Command{ Use: "create FLAG", Short: "create a topic to Kubeless", Long: `create a topic to Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - topic name") } ctlNamespace, err := cmd.Flags().GetString("kafka-namespace") if err != nil { logrus.Fatal(err) } topicName := args[0] conf, err := utils.BuildOutOfClusterConfig() if err != nil { logrus.Fatal(err) } k8sClientSet := utils.GetClientOutOfCluster() err = createTopic(conf, k8sClientSet, ctlNamespace, topicName, cmd.OutOrStdout()) if err != nil { logrus.Fatal(err) } }, } func createTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace, topicName string, out io.Writer) error { command := []string{ "bash", "/opt/bitnami/kafka/bin/kafka-topics.sh", "--zookeeper", "zookeeper." + ctlNamespace + ":2181", "--replication-factor", "1", "--partitions", "1", "--create", "--topic", topicName, } return execCommand(conf, clientset, ctlNamespace, command, out) } // wrapper of kubectl exec // execCommand executes a command to kafka pod func execCommand(conf *rest.Config, k8sClientSet kubernetes.Interface, ctlNamespace string, command []string, out io.Writer) error { pods, err := utils.GetPodsByLabel(k8sClientSet, ctlNamespace, "kubeless", "kafka") if err != nil { return fmt.Errorf("Can't find the kafka pod: %v", err) } else if len(pods.Items) == 0 { return fmt.Errorf("Can't find any kafka pod") } cmd := utils.Cmd{ Stdout: out, Stderr: out, } rt, err := utils.ExecRoundTripper(conf, cmd.RoundTripCallback) if err != nil { return err } opts := v1.PodExecOptions{ Stdin: false, Stdout: true, Stderr: true, Container: "broker", Command: command, } req, err := utils.Exec(k8sClientSet.Core(), pods.Items[0].Name, ctlNamespace, opts) if err != nil { return err } _, err = rt.RoundTrip(req) return err } ================================================ FILE: cmd/kubeless/topic/topicDelete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topic import ( "io" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) var topicDeleteCmd = &cobra.Command{ Use: "delete ", Short: "delete a topic from Kubeless", Long: `delete a topic from Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - topic name") } ctlNamespace, err := cmd.Flags().GetString("kafka-namespace") if err != nil { logrus.Fatal(err) } topicName := args[0] conf, err := utils.BuildOutOfClusterConfig() if err != nil { logrus.Fatal(err) } k8sClientSet := utils.GetClientOutOfCluster() err = deleteTopic(conf, k8sClientSet, ctlNamespace, topicName, cmd.OutOrStdout()) if err != nil { logrus.Fatal(err) } }, } func deleteTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace, topicName string, out io.Writer) error { command := []string{ "bash", "/opt/bitnami/kafka/bin/kafka-topics.sh", "--zookeeper", "zookeeper." + ctlNamespace + ":2181", "--delete", "--topic", topicName, } return execCommand(conf, clientset, ctlNamespace, command, out) } ================================================ FILE: cmd/kubeless/topic/topicList.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topic import ( "io" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) var topicListCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all topics created in Kubeless", Long: `list all topics created in Kubeless`, Run: func(cmd *cobra.Command, args []string) { ctlNamespace, err := cmd.Flags().GetString("kafka-namespace") if err != nil { logrus.Fatal(err) } conf, err := utils.BuildOutOfClusterConfig() if err != nil { logrus.Fatal(err) } k8sClientSet := utils.GetClientOutOfCluster() err = listTopic(conf, k8sClientSet, ctlNamespace, cmd.OutOrStdout()) if err != nil { logrus.Fatal(err) } }, } func listTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace string, out io.Writer) error { command := []string{ "bash", "/opt/bitnami/kafka/bin/kafka-topics.sh", "--zookeeper", "zookeeper." + ctlNamespace + ":2181", "--list", } return execCommand(conf, clientset, ctlNamespace, command, out) } ================================================ FILE: cmd/kubeless/topic/topicPublish.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topic import ( "fmt" "io" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) var topicPublishCmd = &cobra.Command{ Use: "publish FLAG", Short: "publish message to a topic", Long: `publish message to a topic`, Run: func(cmd *cobra.Command, args []string) { data, err := cmd.Flags().GetString("data") if err != nil { logrus.Fatal(err) } topic, err := cmd.Flags().GetString("topic") if err != nil { logrus.Fatal(err) } ctlNamespace, err := cmd.Flags().GetString("kafka-namespace") if err != nil { logrus.Fatal(err) } conf, err := utils.BuildOutOfClusterConfig() if err != nil { logrus.Fatal(err) } k8sClientSet := utils.GetClientOutOfCluster() err = publishTopic(conf, k8sClientSet, ctlNamespace, topic, data, cmd.OutOrStdout()) if err != nil { logrus.Fatal(err) } }, } func publishTopic(conf *rest.Config, clientset kubernetes.Interface, namespace, topic, data string, out io.Writer) error { command := []string{ "bash", "/opt/bitnami/kafka/bin/kafka-console-producer.sh", "--broker-list", "localhost:9092", "--topic", topic, } // Can't just use `execCommand` since we want to specify stdin // TODO(gus): refactor better. pods, err := utils.GetPodsByLabel(clientset, namespace, "kubeless", "kafka") if err != nil { return fmt.Errorf("Can't find the kafka pod: %v", err) } else if len(pods.Items) == 0 { return fmt.Errorf("Can't find any kafka pod") } pRead, pWrite := io.Pipe() cmd := utils.Cmd{ Stdin: pRead, Stdout: out, Stderr: out, } rt, err := utils.ExecRoundTripper(conf, cmd.RoundTripCallback) if err != nil { return err } go func() { io.WriteString(pWrite, data+"\n") pWrite.Close() }() opts := v1.PodExecOptions{ Stdin: true, Stdout: true, Stderr: true, TTY: true, Container: "broker", Command: command, } req, err := utils.Exec(clientset.Core(), pods.Items[0].Name, namespace, opts) if err != nil { return err } _, err = rt.RoundTrip(req) return err } func init() { topicPublishCmd.Flags().StringP("data", "", "", "Specify data for function") topicPublishCmd.Flags().StringP("topic", "", "kubeless", "Specify topic name") } ================================================ FILE: cmd/kubeless/trigger/cronjob/create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "fmt" "github.com/robfig/cron" "github.com/sirupsen/logrus" "github.com/spf13/cobra" cronjobApi "github.com/kubeless/cronjob-trigger/pkg/apis/kubeless/v1beta1" cronjobUtils "github.com/kubeless/cronjob-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createCmd = &cobra.Command{ Use: "create FLAG", Short: "Create a cron job trigger", Long: `Create a cron job trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - cronjob trigger name") } triggerName := args[0] schedule, err := cmd.Flags().GetString("schedule") if err != nil { logrus.Fatal(err) } if _, err := cron.ParseStandard(schedule); err != nil { logrus.Fatalf("Invalid value for --schedule. " + err.Error()) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } functionName, err := cmd.Flags().GetString("function") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } payload, err := cmd.Flags().GetString("payload") if err != nil { logrus.Fatal(err) } payloadFromFile, err := cmd.Flags().GetString("payload-from-file") if err != nil { logrus.Fatal(err) } if len(payload) > 0 && len(payloadFromFile) > 0 { err := "You can't provide both raw payload and a payload file" logrus.Fatal(err) } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } cronJobClient, err := cronjobUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", functionName, ns, err) } parsedPayload, err := parsePayload(payload, payloadFromFile) if err != nil { logrus.Fatalf("Unable to parse the payload of Function %s in namespace %s. Error %s", functionName, ns, err) } cronJobTrigger := cronjobApi.CronJobTrigger{} cronJobTrigger.TypeMeta = metav1.TypeMeta{ Kind: "CronJobTrigger", APIVersion: "kubeless.io/v1beta1", } cronJobTrigger.ObjectMeta = metav1.ObjectMeta{ Name: triggerName, Namespace: ns, } cronJobTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", } cronJobTrigger.Spec.FunctionName = functionName cronJobTrigger.Spec.Schedule = schedule cronJobTrigger.Spec.Payload = parsedPayload if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, cronJobTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = cronjobUtils.CreateCronJobCustomResource(cronJobClient, &cronJobTrigger) if err != nil { logrus.Fatalf("Failed to create cronjob trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Cronjob trigger %s created in namespace %s successfully!", triggerName, ns) }, } func init() { createCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the cronjob trigger") createCmd.Flags().StringP("schedule", "", "", "Specify schedule in cron format for scheduled function") createCmd.Flags().StringP("function", "", "", "Name of the function to be associated with trigger") createCmd.MarkFlagRequired("function") createCmd.MarkFlagRequired("schedule") createCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") createCmd.Flags().StringP("output", "o", "yaml", "Output format") createCmd.Flags().StringP("payload", "p", "", "Specify a stringified JSON data to pass to function upon execution") createCmd.Flags().StringP("payload-from-file", "f", "", "Specify a payload file to use. It must be a JSON file") } ================================================ FILE: cmd/kubeless/trigger/cronjob/cronjob_trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "encoding/json" "fmt" "path/filepath" kubelessutil "github.com/kubeless/kubeless/pkg/utils" "github.com/spf13/cobra" ) // CronjobTriggerCmd command for CronJob trigger commands var CronjobTriggerCmd = &cobra.Command{ Use: "cronjob SUBCOMMAND", Short: "cronjob trigger specific operations", Long: `cronjob trigger command allows user to create, list, update, delete cronjob triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { CronjobTriggerCmd.AddCommand(createCmd) CronjobTriggerCmd.AddCommand(deleteCmd) CronjobTriggerCmd.AddCommand(listCmd) CronjobTriggerCmd.AddCommand(updateCmd) } func parsePayload(content string, file string) (interface{}, error) { if len(file) > 0 { content, err := getPayloadRawContent(file) if err != nil { return nil, err } return parsePayloadContent(content), nil } return parsePayloadContent(content), nil } func getPayloadRawContent(file string) (string, error) { contentType, err := kubelessutil.GetContentType(file) if err != nil { return "", err } content, _, err := kubelessutil.ParseContent(file, contentType) if err != nil { return "", err } ext := filepath.Ext(file) if ext != ".json" { return "", fmt.Errorf("Sorry, we can't parse %s files yet", ext) } return content, nil } func parsePayloadContent(raw string) interface{} { var payload map[string]interface{} err := json.Unmarshal([]byte(raw), &payload) if err != nil { return fmt.Errorf("Found an error during JSON parsing on your payload: %s", err) } return payload } ================================================ FILE: cmd/kubeless/trigger/cronjob/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" cronjobUtils "github.com/kubeless/cronjob-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "delete a cronjob trigger from Kubeless", Long: `delete a cronjob trigger from Kubeless`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - cronjob trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kubelessClient, err := cronjobUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatal(err) } err = cronjobUtils.DeleteCronJobCustomResource(kubelessClient, triggerName, ns) if err != nil { logrus.Fatalf("Failed to delete Cronjob trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Cronjob trigger %s deleted from namespace %s successfully!", triggerName, ns) }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace of the Cronjob trigger") } ================================================ FILE: cmd/kubeless/trigger/cronjob/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "fmt" "io" "github.com/gosuri/uitable" "github.com/kubeless/cronjob-trigger/pkg/client/clientset/versioned" cronjobUtils "github.com/kubeless/cronjob-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all Cronjob triggers deployed to Kubeless", Long: `list all Cronjob triggers deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kubelessClient, err := cronjobUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } if err := doList(cmd.OutOrStdout(), kubelessClient, ns); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } func doList(w io.Writer, kubelessClient versioned.Interface, ns string) error { triggersList, err := kubelessClient.KubelessV1beta1().CronJobTriggers(ns).List(metav1.ListOptions{}) if err != nil { return err } table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "SCHEDULE", "FUNCTION NAME") for _, trigger := range triggersList.Items { table.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Schedule, trigger.Spec.FunctionName) } fmt.Fprintln(w, table) return nil } ================================================ FILE: cmd/kubeless/trigger/cronjob/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "fmt" "github.com/robfig/cron" "github.com/sirupsen/logrus" "github.com/spf13/cobra" cronjobUtils "github.com/kubeless/cronjob-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "Update a cron job trigger", Long: `Update a cron job trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - cronjob trigger name") } triggerName := args[0] schedule, err := cmd.Flags().GetString("schedule") if err != nil { logrus.Fatal(err) } if schedule != "" { if _, err := cron.ParseStandard(schedule); err != nil { logrus.Fatalf("Invalid value for --schedule. " + err.Error()) } } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } functionName, err := cmd.Flags().GetString("function") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } payload, err := cmd.Flags().GetString("payload") if err != nil { logrus.Fatal(err) } payloadFromFile, err := cmd.Flags().GetString("payload-from-file") if err != nil { logrus.Fatal(err) } if len(payload) > 0 && len(payloadFromFile) > 0 { err := "You can't provide both raw payload and a payload file" logrus.Fatal(err) } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } cronJobClient, err := cronjobUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", triggerName, ns, err) } parsedPayload, err := parsePayload(payload, payloadFromFile) if err != nil { logrus.Fatalf("Unable to parse the payload of Function %s in namespace %s. Error %s", functionName, ns, err) } cronJobTrigger, err := cronjobUtils.GetCronJobCustomResource(cronJobClient, triggerName, ns) if err != nil { logrus.Fatalf("Unable to find Cronjob trigger %s in namespace %s. Error %s", triggerName, ns, err) } cronJobTrigger.Spec.FunctionName = functionName cronJobTrigger.Spec.Schedule = schedule cronJobTrigger.Spec.Payload = parsedPayload if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, cronJobTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = cronjobUtils.UpdateCronJobCustomResource(cronJobClient, cronJobTrigger) if err != nil { logrus.Fatalf("Failed to update cronjob trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Cronjob trigger %s updated in namespace %s successfully!", triggerName, ns) }, } func init() { updateCmd.Flags().StringP("namespace", "n", "", "Specify namespace of the cronjob trigger") updateCmd.Flags().StringP("schedule", "", "", "Specify schedule in cron format for scheduled function") updateCmd.Flags().StringP("function", "", "", "Name of the function to be associated with trigger") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") updateCmd.Flags().StringP("payload", "p", "", "Specify a stringified JSON data to pass to function upon execution") updateCmd.Flags().StringP("payload-from-file", "f", "", "Specify a payload file to use. It must be a JSON file") } ================================================ FILE: cmd/kubeless/trigger/http/create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "fmt" httpApi "github.com/kubeless/http-trigger/pkg/apis/kubeless/v1beta1" httpUtils "github.com/kubeless/http-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createCmd = &cobra.Command{ Use: "create FLAG", Short: "Create a http trigger", Long: `Create a http trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - http trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } path, err := cmd.Flags().GetString("path") if err != nil { logrus.Fatal(err) } functionName, err := cmd.Flags().GetString("function-name") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", functionName, ns, err) } httpTrigger := httpApi.HTTPTrigger{} httpTrigger.TypeMeta = metav1.TypeMeta{ Kind: "HTTPTrigger", APIVersion: "kubeless.io/v1beta1", } httpTrigger.ObjectMeta = metav1.ObjectMeta{ Name: triggerName, Namespace: ns, } httpTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", } httpTrigger.Spec.FunctionName = functionName if len(path) != 0 { httpTrigger.Spec.Path = path } enableTLSAcme, err := cmd.Flags().GetBool("enableTLSAcme") if err != nil { logrus.Fatal(err) } httpTrigger.Spec.TLSAcme = enableTLSAcme corsEnabled, err := cmd.Flags().GetBool("cors-enable") if err != nil { logrus.Fatal(err) } httpTrigger.Spec.CorsEnable = corsEnabled tlsSecret, err := cmd.Flags().GetString("tls-secret") if err != nil { logrus.Fatal(err) } if enableTLSAcme && len(tlsSecret) > 0 { logrus.Fatalf("Cannot specify both --enableTLSAcme and --tls-secret") } httpTrigger.Spec.TLSSecret = tlsSecret gateway, err := cmd.Flags().GetString("gateway") if err != nil { logrus.Fatal(err) } if gateway != "nginx" && gateway != "traefik" && gateway != "kong" { logrus.Fatalf("Unsupported gateway %s", gateway) } httpTrigger.Spec.Gateway = gateway hostName, err := cmd.Flags().GetString("hostname") if err != nil { logrus.Fatal(err) } if hostName == "" && gateway == "nginx" { // We assume that Nginx will be listening in the port 80 // of the cluster plublic IP config, err := kubelessUtils.BuildOutOfClusterConfig() if err != nil { logrus.Fatal(err) } hostName, err = httpUtils.GetLocalHostname(config, functionName) if err != nil { logrus.Fatal(err) } } if hostName == "" { logrus.Fatalf("The --hostname flag is required") } httpTrigger.Spec.HostName = hostName basicAuthSecret, err := cmd.Flags().GetString("basic-auth-secret") if err != nil { logrus.Fatal(err) } httpTrigger.Spec.BasicAuthSecret = basicAuthSecret if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, httpTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } httpClient, err := httpUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = httpUtils.CreateHTTPTriggerCustomResource(httpClient, &httpTrigger) if err != nil { logrus.Fatalf("Failed to deploy HTTP trigger %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("HTTP trigger %s created in namespace %s successfully!", triggerName, ns) }, } func init() { createCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the HTTP trigger") createCmd.Flags().StringP("function-name", "", "", "Name of the function to be associated with trigger") createCmd.Flags().StringP("path", "", "", "Ingress path for the function") createCmd.Flags().StringP("hostname", "", "", "Specify a valid hostname for the function") createCmd.Flags().BoolP("enableTLSAcme", "", false, "If true, routing rule will be configured for use with kube-lego") createCmd.Flags().StringP("gateway", "", "nginx", "Specify a valid gateway for the Ingress. Supported: nginx, traefik, kong") createCmd.Flags().StringP("basic-auth-secret", "", "", "Specify an existing secret name for basic authentication") createCmd.Flags().StringP("tls-secret", "", "", "Specify an existing secret that contains a TLS private key and certificate to secure ingress") createCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") createCmd.Flags().StringP("output", "o", "yaml", "Output format") createCmd.Flags().BoolP("cors-enable", "", false, "If true then cors will be enabled on Http Trigger") createCmd.MarkFlagRequired("function-name") } ================================================ FILE: cmd/kubeless/trigger/http/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" httpUtils "github.com/kubeless/http-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a HTTP trigger", Long: `Delete a HTTP trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - Kafka trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } httpClient, err := httpUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = httpUtils.DeleteHTTPTriggerCustomResource(httpClient, triggerName, ns) if err != nil { logrus.Fatalf("Failed to delete HTTP trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("HTTP trigger %s deleted from namespace %s successfully!", triggerName, ns) }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } ================================================ FILE: cmd/kubeless/trigger/http/http_trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "github.com/spf13/cobra" ) // HTTPTriggerCmd command for http trigger commands var HTTPTriggerCmd = &cobra.Command{ Use: "http SUBCOMMAND", Short: "http trigger specific operations", Long: `http trigger command allows user to create, list, update, delete http triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { HTTPTriggerCmd.AddCommand(createCmd) HTTPTriggerCmd.AddCommand(deleteCmd) HTTPTriggerCmd.AddCommand(listCmd) HTTPTriggerCmd.AddCommand(updateCmd) } ================================================ FILE: cmd/kubeless/trigger/http/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "fmt" "io" "github.com/gosuri/uitable" "github.com/kubeless/http-trigger/pkg/client/clientset/versioned" httpUtils "github.com/kubeless/http-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all HTTP triggers deployed to Kubeless", Long: `list all HTTP triggers deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } httpClient, err := httpUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } if err := doList(cmd.OutOrStdout(), httpClient, ns); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } func doList(w io.Writer, kubelessClient versioned.Interface, ns string) error { triggersList, err := kubelessClient.KubelessV1beta1().HTTPTriggers(ns).List(metav1.ListOptions{}) if err != nil { return err } table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "FUNCTION NAME") for _, trigger := range triggersList.Items { table.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.FunctionName) } fmt.Fprintln(w, table) return nil } ================================================ FILE: cmd/kubeless/trigger/http/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package http import ( "fmt" httpUtils "github.com/kubeless/http-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "Update a http trigger", Long: `Update a http trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - http trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } httpClient, err := httpUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } httpTrigger, err := httpUtils.GetHTTPTriggerCustomResource(httpClient, triggerName, ns) if err != nil { logrus.Fatalf("Unable to find HTTP trigger %s in namespace %s. Error %s", triggerName, ns, err) } functionName, err := cmd.Flags().GetString("function-name") if err != nil { logrus.Fatal(err) } if functionName != "" { _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", functionName, ns, err) } httpTrigger.Spec.FunctionName = functionName } path, err := cmd.Flags().GetString("path") if err != nil { logrus.Fatal(err) } if path != "" { httpTrigger.Spec.Path = path } hostName, err := cmd.Flags().GetString("hostname") if err != nil { logrus.Fatal(err) } if hostName != "" { httpTrigger.Spec.HostName = hostName } enableTLSAcme, err := cmd.Flags().GetBool("enableTLSAcme") if err != nil { logrus.Fatal(err) } httpTrigger.Spec.TLSAcme = enableTLSAcme tlsSecret, err := cmd.Flags().GetString("tls-secret") if err != nil { logrus.Fatal(err) } if enableTLSAcme && len(tlsSecret) > 0 { logrus.Fatalf("Cannot specify both --enableTLSAcme and --tls-secret") } if tlsSecret != "" { httpTrigger.Spec.TLSSecret = tlsSecret } gateway, err := cmd.Flags().GetString("gateway") if err != nil { logrus.Fatal(err) } if gateway != "" { httpTrigger.Spec.Gateway = gateway } basicAuthSecret, err := cmd.Flags().GetString("basic-auth-secret") if err != nil { logrus.Fatal(err) } if basicAuthSecret != "" { httpTrigger.Spec.BasicAuthSecret = basicAuthSecret } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, httpTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = httpUtils.UpdateHTTPTriggerCustomResource(httpClient, httpTrigger) if err != nil { logrus.Fatalf("Failed to deploy HTTP trigger %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("HTTP trigger %s updated in namespace %s successfully!", triggerName, ns) }, } func init() { updateCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the HTTP trigger") updateCmd.Flags().StringP("function-name", "", "", "Name of the function to be associated with trigger") updateCmd.Flags().StringP("path", "", "", "Ingress path for the function") updateCmd.Flags().StringP("hostname", "", "", "Specify a valid hostname for the function") updateCmd.Flags().BoolP("enableTLSAcme", "", false, "If true, routing rule will be configured for use with kube-lego") updateCmd.Flags().StringP("gateway", "", "", "Specify a valid gateway for the Ingress") updateCmd.Flags().StringP("basic-auth-secret", "", "", "Specify an existing secret name for basic authentication") updateCmd.Flags().StringP("tls-secret", "", "", "Specify an existing secret that contains a TLS private key and certificate to secure ingress") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/kafka/create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" kafkaApi "github.com/kubeless/kafka-trigger/pkg/apis/kubeless/v1beta1" kafkaUtils "github.com/kubeless/kafka-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createCmd = &cobra.Command{ Use: "create FLAG", Short: "Create a Kafka trigger", Long: `Create a Kafka trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - kafka trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } topic, err := cmd.Flags().GetString("trigger-topic") if err != nil { logrus.Fatal(err) } functionSelector, err := cmd.Flags().GetString("function-selector") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } labelSelector, err := metav1.ParseToLabelSelector(functionSelector) if err != nil { logrus.Fatal("Invalid lable selector specified " + err.Error()) } kafkaTrigger := kafkaApi.KafkaTrigger{} kafkaTrigger.TypeMeta = metav1.TypeMeta{ Kind: "KafkaTrigger", APIVersion: "kubeless.io/v1beta1", } kafkaTrigger.ObjectMeta = metav1.ObjectMeta{ Name: triggerName, Namespace: ns, } kafkaTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", } kafkaTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels kafkaTrigger.Spec.Topic = topic if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, kafkaTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } kafkaClient, err := kafkaUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = kafkaUtils.CreateKafkaTriggerCustomResource(kafkaClient, &kafkaTrigger) if err != nil { logrus.Fatalf("Failed to create Kafka trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kafka trigger %s created in namespace %s successfully!", triggerName, ns) }, } func init() { createCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the kafka trigger") createCmd.Flags().StringP("trigger-topic", "", "", "Specify topic to listen to in Kafka broker") createCmd.Flags().StringP("function-selector", "", "", "Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)") createCmd.MarkFlagRequired("trigger-topic") createCmd.MarkFlagRequired("function-selector") createCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") createCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/kafka/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" kafkaUtils "github.com/kubeless/kafka-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a Kafka trigger", Long: `Delete a Kafka trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - Kafka trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kafkaClient, err := kafkaUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = kafkaUtils.DeleteKafkaTriggerCustomResource(kafkaClient, triggerName, ns) if err != nil { logrus.Fatalf("Failed to delete Kafka trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kafka trigger %s deleted from namespace %s successfully!", triggerName, ns) }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace of the Kafka trigger") } ================================================ FILE: cmd/kubeless/trigger/kafka/kafka_trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "github.com/spf13/cobra" ) // KafkaTriggerCmd command for Kafka trigger commands var KafkaTriggerCmd = &cobra.Command{ Use: "kafka SUBCOMMAND", Short: "kafka trigger specific operations", Long: `kafka trigger command allows user to create, list, update, delete kafka triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { KafkaTriggerCmd.AddCommand(createCmd) KafkaTriggerCmd.AddCommand(deleteCmd) KafkaTriggerCmd.AddCommand(listCmd) KafkaTriggerCmd.AddCommand(updateCmd) } ================================================ FILE: cmd/kubeless/trigger/kafka/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "fmt" "io" "github.com/gosuri/uitable" "github.com/kubeless/kafka-trigger/pkg/client/clientset/versioned" kafkaUtils "github.com/kubeless/kafka-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all Kafka triggers deployed to Kubeless", Long: `list all Kafka triggers deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kafkaClient, err := kafkaUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } if err := doList(cmd.OutOrStdout(), kafkaClient, ns); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") } func doList(w io.Writer, kubelessClient versioned.Interface, ns string) error { triggersList, err := kubelessClient.KubelessV1beta1().KafkaTriggers(ns).List(metav1.ListOptions{}) if err != nil { return err } table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "TOPIC", "FUNCTION SELECTOR") for _, trigger := range triggersList.Items { table.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Topic, metav1.FormatLabelSelector(&trigger.Spec.FunctionSelector)) } fmt.Fprintln(w, table) return nil } ================================================ FILE: cmd/kubeless/trigger/kafka/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" kafkaUtils "github.com/kubeless/kafka-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "Update a Kafka trigger", Long: `Update a Kafka trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - kafka trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kafkaClient, err := kafkaUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } kafkaTrigger, err := kafkaUtils.GetKafkaTriggerCustomResource(kafkaClient, triggerName, ns) if err != nil { logrus.Fatalf("Unable to find Kafka trigger %s in namespace %s. Error %s", triggerName, ns, err) } topic, err := cmd.Flags().GetString("trigger-topic") if err != nil { logrus.Fatal(err) } if topic != "" { kafkaTrigger.Spec.Topic = topic } functionSelector, err := cmd.Flags().GetString("function-selector") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } if functionSelector != "" { labelSelector, err := metav1.ParseToLabelSelector(functionSelector) if err != nil { logrus.Fatal("Invalid lable selector specified " + err.Error()) } kafkaTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels } if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, kafkaTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = kafkaUtils.UpdateKafkaTriggerCustomResource(kafkaClient, kafkaTrigger) if err != nil { logrus.Fatalf("Failed to update Kafka trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kafka trigger %s updated in namespace %s successfully!", triggerName, ns) }, } func init() { updateCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the function") updateCmd.Flags().StringP("trigger-topic", "", "", "Specify topic to listen to in Kafka broker") updateCmd.Flags().StringP("function-selector", "", "", "Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/kinesis/create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "net/url" kinesisApi "github.com/kubeless/kinesis-trigger/pkg/apis/kubeless/v1beta1" kinesisUtils "github.com/kubeless/kinesis-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createCmd = &cobra.Command{ Use: "create FLAG", Short: "Create a Kinesis trigger", Long: `Create a Kinesis trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - Kinesis trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } functionName, err := cmd.Flags().GetString("function-name") if err != nil { logrus.Fatal(err) } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", functionName, ns, err) } streamName, err := cmd.Flags().GetString("stream") if err != nil { logrus.Fatal(err) } regionName, err := cmd.Flags().GetString("aws-region") if err != nil { logrus.Fatal(err) } shardID, err := cmd.Flags().GetString("shard-id") if err != nil { logrus.Fatal(err) } secretName, err := cmd.Flags().GetString("secret") if err != nil { logrus.Fatal(err) } endpointURL, err := cmd.Flags().GetString("endpoint") if err != nil { logrus.Fatal(err) } if len(endpointURL) > 0 { _, err = url.ParseRequestURI(endpointURL) if err != nil { panic(err) } } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } cli := kubelessUtils.GetClientOutOfCluster() _, err = cli.Core().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { logrus.Fatal(err) } kinesisTrigger := kinesisApi.KinesisTrigger{} kinesisTrigger.TypeMeta = metav1.TypeMeta{ Kind: "KinesisTrigger", APIVersion: "kubeless.io/v1beta1", } kinesisTrigger.ObjectMeta = metav1.ObjectMeta{ Name: triggerName, Namespace: ns, } kinesisTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", } kinesisTrigger.Spec.FunctionName = functionName kinesisTrigger.Spec.Region = regionName kinesisTrigger.Spec.Stream = streamName kinesisTrigger.Spec.ShardID = shardID kinesisTrigger.Spec.Secret = secretName kinesisTrigger.Spec.Endpoint = endpointURL if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, kinesisTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } kinesisClient, err := kinesisUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = kinesisUtils.CreateKinesisTriggerCustomResource(kinesisClient, &kinesisTrigger) if err != nil { logrus.Fatalf("Failed to create Kinesis trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kinesis trigger %s created in namespace %s successfully!", triggerName, ns) }, } func init() { createCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the Kinesis trigger") createCmd.Flags().StringP("stream", "", "", "Name of the AWS Kinesis stream") createCmd.Flags().StringP("aws-region", "", "", "AWS region in which stream is available") createCmd.Flags().StringP("shard-id", "", "", "Shard-ID of the AWS kinesis stream") createCmd.Flags().StringP("function-name", "", "", "Name of the Kubeless function to be associated with AWS Kinesis stream") createCmd.Flags().StringP("secret", "", "", "Kubernetes secret that has AWS access key and secret key") createCmd.Flags().StringP("endpoint", "", "", "Override AWS's default service URL with the given URL") createCmd.MarkFlagRequired("stream") createCmd.MarkFlagRequired("aws-region") createCmd.MarkFlagRequired("shard-id") createCmd.MarkFlagRequired("function-name") createCmd.MarkFlagRequired("secret") createCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") createCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/kinesis/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" kinesisUtils "github.com/kubeless/kinesis-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a Kinesis trigger", Long: `Delete a Kinesis trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - Kinesis trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kinesisClient, err := kinesisUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = kinesisUtils.DeleteKinesisTriggerCustomResource(kinesisClient, triggerName, ns) if err != nil { logrus.Fatalf("Failed to delete Kinesis trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kinesis trigger %s deleted from namespace %s successfully!", triggerName, ns) }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace of the Kinesis trigger") } ================================================ FILE: cmd/kubeless/trigger/kinesis/kinesis_trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "github.com/spf13/cobra" ) // KinesisTriggerCmd command for Kinesis trigger commands var KinesisTriggerCmd = &cobra.Command{ Use: "kinesis SUBCOMMAND", Short: "kinesis trigger specific operations", Long: `kinesis trigger command allows users to create, list, update, delete Kinesis triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { KinesisTriggerCmd.AddCommand(createCmd) KinesisTriggerCmd.AddCommand(deleteCmd) KinesisTriggerCmd.AddCommand(listCmd) KinesisTriggerCmd.AddCommand(updateCmd) KinesisTriggerCmd.AddCommand(publishCmd) KinesisTriggerCmd.AddCommand(createStreamCmd) } ================================================ FILE: cmd/kubeless/trigger/kinesis/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "fmt" "io" "github.com/gosuri/uitable" "github.com/kubeless/kinesis-trigger/pkg/client/clientset/versioned" kinesisUtils "github.com/kubeless/kinesis-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all Kinesis triggers deployed to Kubeless", Long: `list all Kinesis triggers deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kinesisClient, err := kinesisUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } if err := doList(cmd.OutOrStdout(), kinesisClient, ns); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the NATS trigger") } func doList(w io.Writer, kubelessClient versioned.Interface, ns string) error { triggersList, err := kubelessClient.KubelessV1beta1().KinesisTriggers(ns).List(metav1.ListOptions{}) if err != nil { return err } table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "REGION", "STREAM", "SHARD", "FUNCTION NAME") for _, trigger := range triggersList.Items { table.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Region, trigger.Spec.Stream, trigger.Spec.ShardID, trigger.Spec.FunctionName) } fmt.Fprintln(w, table) return nil } ================================================ FILE: cmd/kubeless/trigger/kinesis/publish.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "net/url" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/kubeless/kubeless/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var publishCmd = &cobra.Command{ Use: "publish FLAG", Short: "publish message to a Kinesis stream", Long: `publish message to a Kinesis stream`, Run: func(cmd *cobra.Command, args []string) { records, err := cmd.Flags().GetStringArray("records") if err != nil { logrus.Fatal(err) } streamName, err := cmd.Flags().GetString("stream") if err != nil { logrus.Fatal(err) } region, err := cmd.Flags().GetString("aws-region") if err != nil { logrus.Fatal(err) } key, err := cmd.Flags().GetString("partition-key") if err != nil { logrus.Fatal(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } secretName, err := cmd.Flags().GetString("secret") if err != nil { logrus.Fatal(err) } client := utils.GetClientOutOfCluster() secret, err := client.Core().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { logrus.Errorf("Error getting secret: %s necessary to connect to AWS services. Erro: %v", secretName, err) } if _, ok := secret.Data["aws_access_key_id"]; !ok { logrus.Fatalf("Error getting aws_access_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v", secretName, err) } if _, ok := secret.Data["aws_secret_access_key"]; !ok { logrus.Fatalf("Error getting aws_aaws_secret_access_keyccess_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v", secretName, err) } awsAccessKey := string(secret.Data["aws_access_key_id"][:]) awsSecretAccessKey := string(secret.Data["aws_secret_access_key"][:]) endpointURL, err := cmd.Flags().GetString("endpoint") if err != nil { logrus.Fatal(err) } if len(endpointURL) > 0 { _, err = url.ParseRequestURI(endpointURL) if err != nil { panic(err) } } customCreds := credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, "") var s *session.Session if len(endpointURL) > 0 { s = session.New(&aws.Config{Region: aws.String(region), Endpoint: aws.String(endpointURL), Credentials: customCreds}) } else { s = session.New(&aws.Config{Region: aws.String(region), Credentials: customCreds}) } kc := kinesis.New(s) entries := make([]*kinesis.PutRecordsRequestEntry, len(records)) for i, record := range records { entries[i] = &kinesis.PutRecordsRequestEntry{ Data: []byte(record), PartitionKey: aws.String(key), } } _, err = kc.PutRecords(&kinesis.PutRecordsInput{ Records: entries, StreamName: aws.String(streamName), }) if err != nil { panic("Failed to put to record in the stream. Error: " + err.Error()) } }, } func init() { var records []string publishCmd.Flags().StringP("stream", "", "", "Name of the AWS Kinesis stream") publishCmd.Flags().StringP("aws-region", "", "", "AWS region in which stream is available") publishCmd.Flags().StringP("partition-key", "", "", "partiion key to use put message in AWS kinesis stream") publishCmd.Flags().StringArray("records", records, "Specify list of records to be published to the stream") publishCmd.Flags().StringP("endpoint", "", "", "Override AWS's default service URL with the given URL") publishCmd.Flags().StringP("secret", "", "", "Kubernetes secret that has AWS access key and secret key") publishCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the Kinesis trigger") publishCmd.MarkFlagRequired("stream") publishCmd.MarkFlagRequired("aws-region") publishCmd.MarkFlagRequired("partition-key") publishCmd.MarkFlagRequired("message") publishCmd.MarkFlagRequired("aws_access_key_id") publishCmd.MarkFlagRequired("aws_secret_access_key") publishCmd.MarkFlagRequired("records") publishCmd.MarkFlagRequired("secret") } ================================================ FILE: cmd/kubeless/trigger/kinesis/stream_create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" "net/url" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/kubeless/kubeless/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createStreamCmd = &cobra.Command{ Use: "create-stream FLAG", Short: "Create a Kinesis stream", Long: `Create a Kinesis stream. Provide only for convenience/quick testing in Kubeless cli`, Run: func(cmd *cobra.Command, args []string) { regionName, err := cmd.Flags().GetString("aws-region") if err != nil { logrus.Fatal(err) } shardCount, err := cmd.Flags().GetInt64("shard-count") if err != nil { logrus.Fatal(err) } endpointURL, err := cmd.Flags().GetString("endpoint") if err != nil { logrus.Fatal(err) } _, err = url.ParseRequestURI(endpointURL) if err != nil { panic(err) } ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = utils.GetDefaultNamespace() } secretName, err := cmd.Flags().GetString("secret") if err != nil { logrus.Fatal(err) } client := utils.GetClientOutOfCluster() secret, err := client.Core().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { logrus.Errorf("Error getting secret: %s necessary to connect to AWS services. Erro: %v", secretName, err) } if _, ok := secret.Data["aws_access_key_id"]; !ok { logrus.Fatalf("Error getting aws_access_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v", secretName, err) } if _, ok := secret.Data["aws_secret_access_key"]; !ok { logrus.Fatalf("Error getting aws_aaws_secret_access_keyccess_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v", secretName, err) } awsAccessKey := string(secret.Data["aws_access_key_id"][:]) awsSecretAccessKey := string(secret.Data["aws_secret_access_key"][:]) streamName, err := cmd.Flags().GetString("stream-name") customCreds := credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, "") var s *session.Session if len(endpointURL) > 0 { s = session.New(&aws.Config{Region: aws.String(regionName), Endpoint: aws.String(endpointURL), Credentials: customCreds}) } else { s = session.New(&aws.Config{Region: aws.String(regionName), Credentials: customCreds}) } kc := kinesis.New(s) _, err = kc.CreateStream(&kinesis.CreateStreamInput{ShardCount: &shardCount, StreamName: &streamName}) if err != nil { logrus.Fatalf("Failed to create Kinesis stream. Error: %v", err) } }, } func init() { createStreamCmd.Flags().StringP("stream-name", "", "", "A name to identify the stream.") createStreamCmd.Flags().StringP("aws-region", "", "", "AWS region in which stream is to be created.") createStreamCmd.Flags().Int64("shard-count", 1, "The number of shards that the stream will use.") createStreamCmd.Flags().StringP("endpoint", "", "", "Override AWS's default service URL with the given URL") createStreamCmd.Flags().StringP("secret", "", "", "Kubernetes secret that has AWS access key and secret key") createStreamCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the Kinesis trigger") createStreamCmd.MarkFlagRequired("stream-name") createStreamCmd.MarkFlagRequired("aws-region") createStreamCmd.MarkFlagRequired("aws_access_key_id") createStreamCmd.MarkFlagRequired("aws_secret_access_key") createStreamCmd.MarkFlagRequired("secret") } ================================================ FILE: cmd/kubeless/trigger/kinesis/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kinesis import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" kinesisUtils "github.com/kubeless/kinesis-trigger/pkg/utils" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "Update a Kinesis trigger", Long: `Update a Kinesis trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - Kinesis trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } kubelessClient, err := kubelessUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } kinesisClient, err := kinesisUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } kinesisTrigger, err := kinesisUtils.GetKinesisTriggerCustomResource(kinesisClient, triggerName, ns) if err != nil { logrus.Fatalf("Unable to find Kinesis trigger %s in namespace %s. Error %s", triggerName, ns, err) } streamName, err := cmd.Flags().GetString("stream") if err != nil { logrus.Fatal(err) } regionName, err := cmd.Flags().GetString("aws-region") if err != nil { logrus.Fatal(err) } shardID, err := cmd.Flags().GetString("shard-id") if err != nil { logrus.Fatal(err) } secretName, err := cmd.Flags().GetString("secret") if err != nil { logrus.Fatal(err) } functionName, err := cmd.Flags().GetString("function-name") if err != nil { logrus.Fatal(err) } if functionName != "" { _, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns) if err != nil { logrus.Fatalf("Unable to find Function %s in namespace %s. Error %s", functionName, ns, err) } } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } if regionName != "" { kinesisTrigger.Spec.Region = regionName } if secretName != "" { kinesisTrigger.Spec.Secret = secretName } if shardID != "" { kinesisTrigger.Spec.ShardID = shardID } if streamName != "" { kinesisTrigger.Spec.Stream = streamName } if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, kinesisTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = kinesisUtils.UpdateKinesisTriggerCustomResource(kinesisClient, kinesisTrigger) if err != nil { logrus.Fatalf("Failed to update Kinesis trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("Kinesis trigger %s updated in namespace %s successfully!", triggerName, ns) }, } func init() { updateCmd.Flags().StringP("stream", "", "", "Name of the AWS Kinesis stream") updateCmd.Flags().StringP("aws-region", "", "", "AWS region in which stream is available") updateCmd.Flags().StringP("shard-id", "", "", "Shard-ID of the AWS kinesis stream") updateCmd.Flags().StringP("function-name", "", "", "Name of the Kubeless function to be associated with AWS Kinesis stream") updateCmd.Flags().StringP("secret", "", "", "Kubernetes secret that has AWS access key and secret key") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/nats/create.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" natsApi "github.com/kubeless/nats-trigger/pkg/apis/kubeless/v1beta1" natsUtils "github.com/kubeless/nats-trigger/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var createCmd = &cobra.Command{ Use: "create FLAG", Short: "Create a NATS trigger", Long: `Create a NATS trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - NATS trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } topic, err := cmd.Flags().GetString("trigger-topic") if err != nil { logrus.Fatal(err) } functionSelector, err := cmd.Flags().GetString("function-selector") if err != nil { logrus.Fatal(err) } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } labelSelector, err := metav1.ParseToLabelSelector(functionSelector) if err != nil { logrus.Fatal("Invalid label selector specified " + err.Error()) } natsClient, err := natsUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } natsTrigger := natsApi.NATSTrigger{} natsTrigger.TypeMeta = metav1.TypeMeta{ Kind: "NATSTrigger", APIVersion: "kubeless.io/v1beta1", } natsTrigger.ObjectMeta = metav1.ObjectMeta{ Name: triggerName, Namespace: ns, } natsTrigger.ObjectMeta.Labels = map[string]string{ "created-by": "kubeless", } natsTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels natsTrigger.Spec.Topic = topic if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, natsTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = natsUtils.CreateNatsTriggerCustomResource(natsClient, &natsTrigger) if err != nil { logrus.Fatalf("Failed to create NATS trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("NATS trigger %s created in namespace %s successfully!", triggerName, ns) }, } func init() { createCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the NATS trigger") createCmd.Flags().StringP("trigger-topic", "", "", "Specify topic to listen to in NATS") createCmd.Flags().StringP("function-selector", "", "", "Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)") createCmd.MarkFlagRequired("trigger-topic") createCmd.MarkFlagRequired("function-selector") createCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") createCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/nats/delete.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" natsUtils "github.com/kubeless/nats-trigger/pkg/utils" ) var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a NATS trigger", Long: `Delete a NATS trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - NATS trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } natsClient, err := natsUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } err = natsUtils.DeleteNatsTriggerCustomResource(natsClient, triggerName, ns) if err != nil { logrus.Fatalf("Failed to delete NATS trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("NATS trigger %s deleted from namespace %s successfully!", triggerName, ns) }, } func init() { deleteCmd.Flags().StringP("namespace", "n", "", "Specify namespace of the NATS trigger") } ================================================ FILE: cmd/kubeless/trigger/nats/list.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "fmt" "io" "github.com/gosuri/uitable" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" "github.com/kubeless/nats-trigger/pkg/client/clientset/versioned" natsUtils "github.com/kubeless/nats-trigger/pkg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var listCmd = &cobra.Command{ Use: "list FLAG", Aliases: []string{"ls"}, Short: "list all NATS triggers deployed to Kubeless", Long: `list all NATS triggers deployed to Kubeless`, Run: func(cmd *cobra.Command, args []string) { ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err.Error()) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } natsClient, err := natsUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } if err := doList(cmd.OutOrStdout(), natsClient, ns); err != nil { logrus.Fatal(err.Error()) } }, } func init() { listCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the NATS trigger") } func doList(w io.Writer, kubelessClient versioned.Interface, ns string) error { triggersList, err := kubelessClient.KubelessV1beta1().NATSTriggers(ns).List(metav1.ListOptions{}) if err != nil { return err } table := uitable.New() table.MaxColWidth = 50 table.Wrap = true table.AddRow("NAME", "NAMESPACE", "TOPIC", "FUNCTION SELECTOR") for _, trigger := range triggersList.Items { table.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Topic, metav1.FormatLabelSelector(&trigger.Spec.FunctionSelector)) } fmt.Fprintln(w, table) return nil } ================================================ FILE: cmd/kubeless/trigger/nats/nats_trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "github.com/spf13/cobra" ) // NATSTriggerCmd command for NATS trigger commands var NATSTriggerCmd = &cobra.Command{ Use: "nats SUBCOMMAND", Short: "nats trigger specific operations", Long: `nats trigger command allows user to create, list, update, delete NATS triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { NATSTriggerCmd.AddCommand(createCmd) NATSTriggerCmd.AddCommand(deleteCmd) NATSTriggerCmd.AddCommand(listCmd) NATSTriggerCmd.AddCommand(updateCmd) NATSTriggerCmd.AddCommand(publishCmd) } ================================================ FILE: cmd/kubeless/trigger/nats/publish.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/nats-io/go-nats" ) var publishCmd = &cobra.Command{ Use: "publish FLAG", Short: "publish message to a topic", Long: `publish message to a topic`, Run: func(cmd *cobra.Command, args []string) { data, err := cmd.Flags().GetString("message") if err != nil { logrus.Fatal(err) } topic, err := cmd.Flags().GetString("topic") if err != nil { logrus.Fatal(err) } url, err := cmd.Flags().GetString("url") if err != nil { logrus.Fatal(err) } err = publishTopic(topic, data, url) if err != nil { logrus.Fatal("Failed to publish message to topic: ", err) } }, } func publishTopic(topic, message, url string) error { nc, err := nats.Connect(url) if err != nil { logrus.Fatal(err) } defer nc.Close() nc.Publish(topic, []byte(message)) nc.Flush() if err := nc.LastError(); err != nil { return err } logrus.Infof("Published [%s] : '%s'\n", topic, message) return nil } func init() { publishCmd.Flags().StringP("message", "", "", "Specify message to be published") publishCmd.Flags().StringP("topic", "", "kubeless", "Specify topic name") publishCmd.Flags().StringP("url", "", "", "Specify NATS server details for e.g nats://localhost:4222)") publishCmd.MarkFlagRequired("url") publishCmd.MarkFlagRequired("topic") publishCmd.MarkFlagRequired("message") } ================================================ FILE: cmd/kubeless/trigger/nats/update.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nats import ( "fmt" "github.com/sirupsen/logrus" "github.com/spf13/cobra" kubelessUtils "github.com/kubeless/kubeless/pkg/utils" natsUtils "github.com/kubeless/nats-trigger/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var updateCmd = &cobra.Command{ Use: "update FLAG", Short: "Update a NATS trigger", Long: `Update a NATS trigger`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { logrus.Fatal("Need exactly one argument - NATS trigger name") } triggerName := args[0] ns, err := cmd.Flags().GetString("namespace") if err != nil { logrus.Fatal(err) } if ns == "" { ns = kubelessUtils.GetDefaultNamespace() } natsClient, err := natsUtils.GetKubelessClientOutCluster() if err != nil { logrus.Fatalf("Can not create out-of-cluster client: %v", err) } natsTrigger, err := natsUtils.GetNatsTriggerCustomResource(natsClient, triggerName, ns) if err != nil { logrus.Fatalf("Unable to find NATS trigger %s in namespace %s. Error %s", triggerName, ns, err) } topic, err := cmd.Flags().GetString("trigger-topic") if err != nil { logrus.Fatal(err) } if topic != "" { natsTrigger.Spec.Topic = topic } functionSelector, err := cmd.Flags().GetString("function-selector") if err != nil { logrus.Fatal(err) } if functionSelector != "" { labelSelector, err := metav1.ParseToLabelSelector(functionSelector) if err != nil { logrus.Fatal("Invalid label selector specified " + err.Error()) } natsTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels } dryrun, err := cmd.Flags().GetBool("dryrun") if err != nil { logrus.Fatal(err) } output, err := cmd.Flags().GetString("output") if err != nil { logrus.Fatal(err) } if dryrun == true { res, err := kubelessUtils.DryRunFmt(output, natsTrigger) if err != nil { logrus.Fatal(err) } fmt.Println(res) return } err = natsUtils.UpdateNatsTriggerCustomResource(natsClient, natsTrigger) if err != nil { logrus.Fatalf("Failed to update NATS trigger object %s in namespace %s. Error: %s", triggerName, ns, err) } logrus.Infof("NATS trigger %s updated in namespace %s successfully!", triggerName, ns) }, } func init() { updateCmd.Flags().StringP("namespace", "n", "", "Specify namespace for the NATS trigger") updateCmd.Flags().StringP("trigger-topic", "", "", "Specify topic to listen to in NATS") updateCmd.Flags().StringP("function-selector", "", "", "Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)") updateCmd.Flags().Bool("dryrun", false, "Output JSON manifest of the function without creating it") updateCmd.Flags().StringP("output", "o", "yaml", "Output format") } ================================================ FILE: cmd/kubeless/trigger/trigger.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "github.com/kubeless/kubeless/cmd/kubeless/trigger/cronjob" "github.com/kubeless/kubeless/cmd/kubeless/trigger/http" "github.com/kubeless/kubeless/cmd/kubeless/trigger/kafka" "github.com/kubeless/kubeless/cmd/kubeless/trigger/kinesis" "github.com/kubeless/kubeless/cmd/kubeless/trigger/nats" "github.com/spf13/cobra" ) // TriggerCmd contains first-class command for trigger var TriggerCmd = &cobra.Command{ Use: "trigger SUBCOMMAND", Short: "trigger specific operations", Long: `trigger command allows user to create, list, update, delete triggers running on Kubeless`, Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, } func init() { TriggerCmd.AddCommand(cronjob.CronjobTriggerCmd) TriggerCmd.AddCommand(kafka.KafkaTriggerCmd) TriggerCmd.AddCommand(http.HTTPTriggerCmd) TriggerCmd.AddCommand(nats.NATSTriggerCmd) TriggerCmd.AddCommand(kinesis.KinesisTriggerCmd) } ================================================ FILE: cmd/kubeless/version/version.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version import ( "fmt" "github.com/kubeless/kubeless/pkg/version" "github.com/spf13/cobra" ) // VersionCmd contains first-class command for version var VersionCmd = &cobra.Command{ Use: "version", Short: "Print the version of Kubeless", Long: ``, Run: func(cmd *cobra.Command, args []string) { fmt.Println("Kubeless version: " + version.Version) }, } ================================================ FILE: docker/controller-manager ================================================ [File too large to display: 24.0 MB] ================================================ FILE: docker/dev-environment/Dockerfile ================================================ FROM docker:17.11.0-ce-dind ENV GOPATH=/go ENV PATH=$GOPATH/bin:/usr/local/go/bin:/usr/local/bats/bin:$PATH \ CGO_ENABLED=0 # Install packages that requires persistence RUN set -eux; \ apk add --no-cache \ bash \ git \ make \ sudo \ gcc \ musl-dev \ openssl \ ca-certificates \ zip \ curl \ go && \ # Install kubectl KUBECTL_VERSION=$(wget -qO- https://storage.googleapis.com/kubernetes-release/release/stable.txt) && \ wget "https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" -O "/usr/local/bin/kubectl" && \ chmod +x /usr/local/bin/kubectl && \ # Install gox and golint go get github.com/mitchellh/gox github.com/golang/lint/golint && \ # Install bats git clone --depth 1 https://github.com/sstephenson/bats /usr/local/bats && \ # Install kubecfg wget "https://github.com/ksonnet/kubecfg/releases/download/v0.5.0/kubecfg-linux-amd64" -O "/usr/local/bin/kubecfg" && chmod +x "/usr/local/bin/kubecfg" WORKDIR $GOPATH ADD ./entry-point.sh / ENTRYPOINT [ "/entry-point.sh" ] ================================================ FILE: docker/dev-environment/entry-point.sh ================================================ #!/bin/bash if [ ! -d "$GOPATH/src/github.com/kubeless/kubeless" ]; then echo "Kubeless directory not found" exit 1 fi if [ ! -d "$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib" ]; then # Ksonnet-lib is required in the same folder than Kubeless git clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git "$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib" fi export KUBECFG_JPATH="$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib" dockerd > /dev/null 2>&1 & cd "$GOPATH/src/github.com/kubeless/kubeless" "$@" ================================================ FILE: docker/event-sources/kubernetes/Dockerfile ================================================ FROM bitnami/minideb:jessie RUN install_packages python3 curl ca-certificates git RUN curl https://bootstrap.pypa.io/get-pip.py --output get-pip.py RUN python3 ./get-pip.py RUN pip3 install --no-cache-dir --upgrade kubernetes RUN pip3 install --no-cache-dir --upgrade requests RUN git clone --depth 1 https://github.com/dpkp/kafka-python WORKDIR kafka-python RUN python3 ./setup.py install WORKDIR / ADD events.py . CMD ["python3", "/events.py"] ================================================ FILE: docker/event-sources/kubernetes/README.md ================================================ # Container to feed k8s events to kafka `events.py` is a Python 3.4 script, that uses `asyncio` and the Kubernetes python client plus a Kafka client to watch for k8s events and send those events onto the kubeless Kafka _k8s_ topic. The Dockerfile just builds an image to start this as a deployment in a k8s cluster running kubeless. ## Usage Create the `k8s` topic in kubeless: ``` kubeless topic create k8s ``` Then launch the event sync ``` kubectl run event --image=skippbox/k8s-events:0.10.12 ``` ================================================ FILE: docker/event-sources/kubernetes/events.py ================================================ import asyncio import logging import json from kubernetes import client, config, watch from kafka import KafkaProducer from kafka.errors import KafkaError logger = logging.getLogger('k8s_events') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) #config.load_kube_config() config.load_incluster_config() v1 = client.CoreV1Api() v1ext = client.ExtensionsV1beta1Api() producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8')) @asyncio.coroutine def pods(): w = watch.Watch() for event in w.stream(v1.list_pod_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def namespaces(): w = watch.Watch() for event in w.stream(v1.list_namespace): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def services(): w = watch.Watch() for event in w.stream(v1.list_service_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8')) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def deployments(): w = watch.Watch() for event in w.stream(v1ext.list_deployment_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) @asyncio.coroutine def replicasets(): w = watch.Watch() for event in w.stream(v1ext.list_replica_set_for_all_namespaces): logger.info("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) msg = {'type':event['type'],'object':event['raw_object']} producer.send('k8s', msg) producer.flush() yield from asyncio.sleep(0.1) ioloop = asyncio.get_event_loop() ioloop.create_task(pods()) ioloop.create_task(namespaces()) ioloop.create_task(services()) ioloop.create_task(deployments()) ioloop.create_task(replicasets()) try: # Blocking call interrupted by loop.stop() print('step: loop.run_forever()') ioloop.run_forever() except KeyboardInterrupt: pass finally: print('step: loop.close()') ioloop.close() ================================================ FILE: docker/function-controller/Dockerfile ================================================ FROM bitnami/minideb:jessie RUN install_packages ca-certificates ADD kubeless-function-controller /kubeless-function-controller ENTRYPOINT ["/kubeless-function-controller"] ================================================ FILE: docker/function-image-builder/Dockerfile ================================================ FROM fedora:27 RUN dnf install -y skopeo nodejs ADD imbuilder / ADD entrypoint.sh / ENTRYPOINT [ "/entrypoint.sh" ] ================================================ FILE: docker/function-image-builder/entrypoint.sh ================================================ #!/bin/bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e # Kubernetes ImagePullSecrets uses .dockerconfigjson as the file name # for storing credentials but skopeo requires it to be named config.json if [ -f $DOCKER_CONFIG_FOLDER/.dockerconfigjson ]; then echo "Creating $HOME/.docker/config.json" mkdir -p $HOME/.docker ln -s $DOCKER_CONFIG_FOLDER/.dockerconfigjson $HOME/.docker/config.json fi "${@}" ================================================ FILE: docker/runtime/README.md ================================================ Kubeless Runtimes has been migrated to it's own repository. You can find them here: https://github.com/kubeless/runtimes/ If you are interested in creating a new runtime please follow the instructions here: https://kubeless.io/docs/implementing-new-runtime/ ================================================ FILE: docker/unzip/Dockerfile ================================================ FROM bitnami/minideb RUN install_packages unzip curl ca-certificates tar gzip bzip2 xz-utils ================================================ FILE: docs/GKE-deployment.md ================================================ # Deploying Kubeless to Google Kubernetes Engine (GKE) This guide goes over the required steps for deploying Kubeless in GKE. There are a few pain points that you need to know in order to successfully deploy Kubeless in a GKE environment. First your google cloud account should have enough privileges to create and manage clusters. You can login to your account using the `gcloud` CLI tool: ```console $ gcloud auth login Go to the following link in your browser: https://accounts.google.com/o/oauth2/auth?redirect_uri=... Enter verification code: ... You are now logged in as [your@mail.com]. Your current project is [your-project]. You can change this setting by running: $ gcloud config set project PROJECT_ID ``` You can also follow the initialization process executing `gcloud init`. ## Creating a cluster Once you are logged in, you can create the cluster: ```console gcloud container clusters create \ --cluster-version=1.8.10-gke.0 \ my-cluster \ --num-nodes 5 ``` At the moment of writing this document, the CI/CD system is testing Kubeless against GKE 1.8 so that's the one we are specifying as the desired version. You can check the current version tested in [the Travis file](../.travis.yml). The default number of nodes is 3. That default number is enough for small deployments but it is recommended to use at least 5 or 7 nodes so you don't run out of resources after deploying a few functions. After a few minutes you should be able to see your cluster running: ```console $ gcloud container clusters list NAME ... STATUS my-cluster ... RUNNING ``` ## Creating the admin clusterrolebinding For deploying Kubeless in your cluster, your user should have enough permissions for creating cluster roles and cluster role bindings. For doing so you need to give your current GKE account admin privileges in the new cluster. This is not being done by default so you need to do it manually: ```console kubectl create clusterrolebinding kubeless-cluster-admin --clusterrole=cluster-admin --user= ``` The above command may fail with: ```console Error from server (Forbidden): User "your-gke-user" cannot create clusterrolebindings.rbac.authorization.k8s.io at the cluster scope ``` This error is shown since your account doesn't have privileges to create `clusterrolebindings` (even if you are able to create clusters). If that is the case you can still perform the above operation using the default `admin` user. You can retrieve the admin password executing: ```console gcloud container clusters describe my-cluster --zone ``` Once you have the admin password you can retry the command above: ```console kubectl --username=admin --password= \ create clusterrolebinding kubeless-cluster-admin \ --clusterrole=cluster-admin --user= ``` ## Deploying Kubeless After that your are finally able to deploy Kubeless. Get the latest release from the [release page](https://github.com/kubeless/kubeless/releases) and deploy the RBAC version of the Kubeless manifest. ## Kubeless on GKE 1.8.x with Alpha features On GKE 1.8.x, when you have finished the above steps, there is still one step required to make the Kafka/Zookeeper PVC bounded if you enable alpha features when creating your cluster. Checking PVC you will see they are pending: ``` kubectl get pvc -n kubeless NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE datadir-kafka-0 Pending 2m zookeeper-zoo-0 Pending 2m ``` Because there are no correlative PV available, you have to create them. On GKE, you might want to go with [GKE Persistent Disk](https://kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk). First, create two PD with this command: ```console gcloud compute disks create --size=1GB --zone= kubeless-kafka gcloud compute disks create --size=1GB --zone= kubeless-zookeeper ``` Then create Kafka and Zookeeper PV: ```console kubectl create -f docs/misc/kafka-pv.yaml kubectl create -f docs/misc/zookeeper-pv.yaml ``` Once both PV are created, the PVC will be bounded shortly and you will see Kafka and Zookeeper running: ```console kubectl get pod -n kubeless NAME READY STATUS RESTARTS AGE kafka-0 1/1 Running 1 30m kubeless-controller-659755588f-bwch6 1/1 Running 0 30m zoo-0 1/1 Running 0 30m ``` ================================================ FILE: docs/README.md ================================================ # Kubeless Docs This folder holds the documentation that is served in [https://kubeless.io/docs](https://kubeless.io/docs). > Note:This folder may contain changes that has not been released yet. To get the latest features available click in the link above. ================================================ FILE: docs/advanced-function-deployment.md ================================================ # Deploying Kubeless Functions using Kubernetes API Apart from using the `kubeless` CLI, it is possible to deploy Kubeless Functions directly using the Kubernetes API and creating Function objects. A minimal Function might look like: ```yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: get-python namespace: default label: created-by: kubeless function: get-python spec: runtime: python2.7 timeout: "180" handler: helloget.foo deps: "" checksum: sha256:d251999dcbfdeccec385606fd0aec385b214cfc74ede8b6c9e47af71728f6e9a function-content-type: text function: | def foo(event, context): return "hello world" ``` The fields that a Function specification can contain are: - Runtime: Runtime ID and version that the function will use. It should match one of the availables in the [Kubeless configuration](/docs/function-controller-configuration). - Timeout: Maximum timeout for the given function. After that time, the function execution will be terminated. - Handler: Pair of `.`. When using `zip` or `compressedtar` in `function-content-type`, the `` will be used to find the file with the function to expose. In other cases, it will be used just as a final file name. `` is used to select the function to run from the exported functions of ``. This field is mandatory and should match with an exported function. - Deps: Dependencies of the function. The format of this field will depend on the runtime, e.g. a `package.json` for NodeJS functions or a `Gemfile` for Ruby. - Checksum: SHA256 of the function content. - Function content type: Content type of the function. Current supported values are `base64`, `url` or `text`. If the content is zipped, the suffix `+zip` should be added. If the content is a gzip/bzip2/xz compressed tar file, the suffix `+compressedtar` should be added. - Function: Function content. Apart from the basic parameters, it is possible to add the specification of a `Deployment`, a `Service` or an `Horizontal Pod Autoscaler` that Kubeless will use to generate them. ## Pod Anti Affinity By default, a kubless generated `Deployment` will include a soft pod anti-affinity rule that will signal to kubernetes that it should try to deploy pods to different nodes. This behaviour can be overridden using a deployment template. ## Deploying large functions As any Kubernetes object, function objects have a maximum size of 1.5MiB (due to the [maximum size](https://github.com/etcd-io/etcd/blob/master/Documentation/dev-guide/limit.md#request-size-limit) of an etcd entry). Because of that, it's not possible to specify in the `function` field of the YAML content that surpasses that size. To workaround this issue it's possible to specify an URL in the `function` field. This file will be downloaded at build time (extracted if necessary) and the checksum will be checked. Doing this we avoid any limitation regarding the file size. It's also possible to include the function dependencies in this file and skip the dependency installation step. Note that since the file will be downloaded in a pod the URL should be accessible from within the cluster: ```yaml checksum: sha256:d1f84e9f0a8ce27e7d9ce6f457126a8f92e957e5109312e7996373f658015547 function: https://github.com/kubeless/kubeless/blob/master/examples/nodejs/helloFunctions.zip?raw=true function-content-type: url+zip ``` ## Functions with bundled deps file Since the dependencies file(for python runtime: ``requirement.txt``) will become long and difficult to put into kubernetes object as function getting complex, Kubeless support use the deps file in remote zip file with function. Usage: - 1.Compress your function and dependencies file(in this case: ``requirement.txt``) into a zip file - 2.add ``+deps`` into ``function-content-type`` ```yaml checksum: sha256:d1f84e9f0a8ce27e7d9ce6f457126a8f92e957e5109312e7996373f658015547 function: https://github.com/kubeless/kubeless/blob/master/examples/nodejs/hellowithbundleddeps.zip?raw=true function-content-type: url+zip+deps ``` ## Custom Deployment It is possible to specify a [`Deployment` spec](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#creating-a-deployment) in the Function spec that will be merged with default values set by the Kubeless controller. It is not necessary to specify all the fields of the deployment, just the fields you are interested on overwriting. For example: ```yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: get-python ... spec: ... deployment: spec: template: spec: initContainers: - resources: limits: cpu: 200m memory: 200Mi requests: cpu: 200m memory: 200Mi containers: - env: - name: FOO value: bar name: "" resources: limits: cpu: 100m memory: 100Mi requests: cpu: 100m memory: 100Mi volumeMounts: - mountPath: /my_secret name: my-secret-vol volumes: - name: my-secret-vol secret: secretName: my-secret ``` Would create a function with the environment variable `FOO`, using CPU and memory limits and mounting the secret `my-secret` as a volume. Note that you can also specify a default template for a Deployment spec in the [controller configuration](/docs/function-controller-configuration). The resource configuration in `initContainers` will be applied to all of the initial containers in the target deployment (like `provision`, `compile` etc.) ## Custom Service As with a deployment, it is possible to specify custom values for a [Service](https://kubernetes.io/docs/concepts/services-networking/service). This would be an example: ```yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: get-python ... spec: ... service: clusterIP: None ports: - name: http-function-port port: 9090 protocol: TCP targetPort: 9090 selector: created-by: kubeless function: get-python type: ClusterIP ``` The example above will create a headless service running in the port 9090. ## Horizontal Pod Autoscaler For configuring the [autoscale feature](/docs/autoscaling) it is possible to attach an [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to a function: ```yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: get-python ... spec: ... horizontalPodAutoscaler: apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: name: get-python namespace: default spec: maxReplicas: 3 metrics: - resource: name: cpu targetAverageUtilization: 70 type: Resource minReplicas: 1 scaleTargetRef: apiVersion: apps/v1beta1 kind: Deployment name: get-python ``` The above specification will create a Horizontal Pod Autoscaler using CPU metrics. ================================================ FILE: docs/architecture.md ================================================ # Kubeless architecture This doc covers the architectural design of Kubeless and directory structure of the repository. ## Concepts Kubeless is built around below core concepts: - Functions - Triggers - Runtime ### Functions A _Function_ is representation of the code to be executed. Along with the code a _Function_ contains metadata about its runtime dependencies, build instructions etc. A _Function_ has a independent life-cycle. The following methods are supported: * Deploy - Deploy function as function instances. This step may involve building the function image or re-use pre-generated image and deploying it on the cluster. * Execute - Invoke a function directly i.e) not through any event source * Get - Return the function metadata and spec * Update - Modify the function specification and its metadata * Delete - Delete a function, and clean up any resource provisioned for the function from the cluster * List - Show the list of functions and their metadata * Logs - Return the logs generated by a function ### Triggers A _Trigger_ represents an event source for the functions associated to it. When an event occurs in the event source, Kubeless will ensure that the associated functions are invoked **at most once**. A Trigger can be associated to a single function or to several ones depending on the event source type. They are decoupled from the life-cycle of functions and can be independently operated with the following methods: * Create - Create a new trigger with details on event source and associated functions * Update - Modify the trigger specification * Delete - Delete a trigger, and clean up any resource provisioned for the trigger * List - Show the list of trigger and their specification ### Runtime A _Runtime_ represents language and runtime specific environment in which function will be executed. Please see [runtimes](/docs/runtimes) for more details. ## Design Kubeless leverages multiple concepts of Kubernetes in order to support functions deployed on top of it. In details, we have been using: - A Custom Resource Definitions (CRD) is used to represent function - Each event source is modeled as a separate Trigger CRD object - Separate Custom Resource Definitions controller to handle CRUD operations corresponding to CRD object - Deployment / Pod to run the corresponding runtime. - Configmap to inject function's code into the runtime pod. - Init-container to load the dependencies that function might have. - Service to expose function. - Ingress resources to expose functions externally Use of Kubernetes CRD's and CRD controllers forms the core design tenet of Kubeless. Use of separate CRD's for functions and triggers provides clear separation of concerns. Use of separate CRD controllers keeps the code decoupled and modular. ### Functions When you install kubeless, there is a CRD `functions.kubeless.io` created to represent _Function_: ```yaml $ kubectl get customresourcedefinition functions.kubeless.io -o yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: ... name: functions.kubeless.io ... selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/functions.kubeless.io spec: group: kubeless.io names: kind: Function listKind: FunctionList plural: functions singular: function scope: Namespaced version: v1beta1 ``` Then function custom objects will be created under this CRD endpoint. A function object looks like this: ```yaml $ kubectl get function get-python -o yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: clusterName: "" creationTimestamp: 2018-03-25T19:13:07Z finalizers: - kubeless.io/function generation: 0 labels: created-by: kubeless function: get-python name: get-python namespace: default resourceVersion: "9219" selfLink: /apis/kubeless.io/v1beta1/namespaces/default/functions/get-python uid: 8d25a793-3060-11e8-ad89-08002730c417 spec: checksum: sha256:d251999dcbfdeccec385606fd0aec385b214cfc74ede8b6c9e47af71728f6e9a deployment: metadata: creationTimestamp: null spec: strategy: {} template: metadata: creationTimestamp: null spec: containers: - name: "" resources: {} status: {} deps: "" function: | def foo(event, context): return "hello world" function-content-type: text handler: helloget.foo horizontalPodAutoscaler: metadata: creationTimestamp: null spec: maxReplicas: 0 scaleTargetRef: kind: "" name: "" status: conditions: null currentMetrics: null currentReplicas: 0 desiredReplicas: 0 runtime: python2.7 service: ports: - name: http-function-port port: 8080 protocol: TCP targetPort: 8080 selector: created-by: kubeless function: get-python type: ClusterIP timeout: "180" ``` `function.spec` contains function's details like code, handler, runtime and probably its dependency file etc. Kubeless ships with a CRD controller named `function-controller` which continuously watches changes to function objects and reacts accordingly. By default function-controller is installed in `kubeless-controller-manager` deployment which is deployed into `kubeless` namespace. Function-controller watches for create events corresponding to creation of _Function_ object. Function-controller creates a deployment for the function, and exposes the function as a clusterIP service. Both deployment and service resources created for the function can be controlled by the function creator by explicitly specifying deployment spec and service spec respectively in the `function.spec`. Runtime image used for the function deployment could be chosen by one of the below options: * User explicitly specifies custom runtime image to be used for the function * Image artifact is generated on the fly by the function builder * A pre-built image is used for each language and version combination. A configmap is used to inject the function code from function.spec.function into the corresponding k8s runtime pod. Function-controller on receiving Function CRD object deletion event, cleans up all the resources (deployment, service, configmap etc) provisioned. ### HTTP triggers When you install kubeless, there is a CRD `httptriggers.kubeless.io` created to represent HTTP triggers: ```console $ kubectl get customresourcedefinition httptriggers.kubeless.io -o yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: httptriggers.kubeless.io resourceVersion: "102" selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/httptriggers.kubeless.io uid: 0aa4a346-2ff4-11e8-ad89-08002730c417 spec: group: kubeless.io names: kind: HTTPTrigger listKind: HTTPTriggerList plural: httptriggers singular: httptrigger scope: Namespaced version: v1beta1 ``` HTTP trigger custom objects will be created under `httptriggers.kubeless.io` CRD endpoint. An example HTTP trigger object looks like this: ```console $ kubectl get httptrigger get-python -o yaml apiVersion: kubeless.io/v1beta1 kind: HTTPTrigger metadata: labels: created-by: kubeless name: get-python namespace: default spec: function-name: get-python host-name: get-python.192.168.99.100.nip.io ingress-enabled: true path: func tls: false ``` HTTP trigger object spec contains below fields: * function-name - name of the associated function that needs to be invoked when URL corresponding to http trigger is accessed * host-name - name used for virtual hosting * path - route requests with this path to function service * tls - true if TLS is to be enabled `kubeless-controller-manager` ships with http trigger CRD controller which watches for the HTTP trigger CRD objects and configures Kubernetes ingress accordingly. ### Cronjob triggers ```console $ kubectl get customresourcedefinition cronjobtriggers.kubeless.io -o yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: cronjobtriggers.kubeless.io selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/cronjobtriggers.kubeless.io spec: group: kubeless.io names: kind: CronJobTrigger listKind: CronJobTriggerList plural: cronjobtriggers singular: cronjobtrigger scope: Namespaced version: v1beta1 ``` Cronjob trigger custom objects will be created under `cronjobtriggers.kubeless.io` CRD endpoint. An example Cronjob trigger object looks like this: ```console $ kubectl get cronjobtrigger scheduled-get-python -o yaml apiVersion: kubeless.io/v1beta1 kind: CronJobTrigger metadata: labels: created-by: kubeless function: scheduled-get-python name: scheduled-get-python namespace: default spec: function-name: scheduled-get-python schedule: '* * * * *' ``` Cronjob trigger object spec contains below fields: * function-name - name of the associated function that needs to be invoked periodically as per specified * schedule - it takes a Cron format string, e.g. 0 * * * * or @hourly, as schedule time of its jobs to be created and executed. `kubeless-controller-manager` ships with Cronjob trigger CRD controller which watches for the Cronjob trigger CRD objects and configures Kubernetes cronjobs to run the functions at scheduled intrerval time. ### Kafka triggers ```console kubectl get customresourcedefinition kafkatriggers.kubeless.io -o yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: kafkatriggers.kubeless.io selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/kafkatriggers.kubeless.io uid: 0aa3988f-2ff4-11e8-ad89-08002730c417 spec: group: kubeless.io names: kind: KafkaTrigger listKind: KafkaTriggerList plural: kafkatriggers singular: kafkatrigger scope: Namespaced version: v1beta1 ``` Kafka trigger custom objects will be created under `kafkatriggers.kubeless.io` CRD endpoint. An example Kafka trigger object looks like this: ```console $ kubectl get kafkatrigger s3-python-kafka-trigger -o yaml apiVersion: kubeless.io/v1beta1 kind: KafkaTrigger metadata: labels: created-by: kubeless name: s3-python-kafka-trigger namespace: default spec: functionSelector: matchLabels: created-by: kubeless topic: s3-python topic: s3-python ``` Kafka trigger object spec contains below fields: * functionSelector - label selector that selects list of matching functions * topic - Kafka topic messages to which the functions associated must be invoked. ## Kubeless command-line client Together with `kubeless-controller-manager`, we provide `kubeless` cli which enables users to interact with Kubeless system. At this moment, Kubeless cli provides these below actions: ```console $ kubeless --help Serverless framework for Kubernetes Usage: kubeless [command] Available Commands: autoscale Manage autoscale to function on Kubeless completion Output shell completion code for the specified shell. function Function specific operations get-server-config Print the current configuration of the controller help Help about any command topic Manage message topics in Kubeless trigger Trigger specific operations version Print the version of Kubeless Flags: -h, --help help for kubeless Use "kubeless [command] --help" for more information about a command. ``` ## Implementation Kubeless controller is written in Go programming language, and uses the Kubernetes client-go to interact with the Kubernetes apiserver. Kubeless CLI is written in Go as well, using the popular cli library `github.com/spf13/cobra`. Basically it is a bundle of HTTP requests and kubectl commands. We send http requests to the Kubernetes apiserver in order to 'crud' CRD objects. Checkout [the cmd folder](https://github.com/kubeless/kubeless/tree/master/cmd/kubeless) for more details. ## Directory structure In order to help you getting a better feeling before you start diving into the project, we would give you the 10,000 feet view of the source code directory structure. - chart: chart to deploy Kubeless with Helm. - cmd: contains kubeless cli implementation and kubeless-controller. - docker: contains artifacts for building the kubeless-controller and runtime images. - docs: contains documentations. - examples: contains some samples of running function with kubeless. - manifests: collection of manifests for additional features. - pkg: contains shared packages. - script: contains build scripts. - vendor: contains dependencies packages. ================================================ FILE: docs/autoscaling.md ================================================ # Autoscaling function deployment in Kubeless This document gives you an overview of how we do autoscaling for functions in Kubeless and also give you a walkthrough how to configure it for custom metric. ## Overview Kubernetes introduces [HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) for pod autoscaling. In kubeless, each function is deployed into a separate Kubernetes deployment, so naturally we leverage HPA to automatically scale function based on defined workload metrics. If you're on Kubeless CLI, this below command gives you an idea how to setup autoscaling for deployed function: ```console $ kubeless autoscale --help autoscale command allows user to list, create, delete autoscale rule for function on Kubeless Usage: kubeless autoscale SUBCOMMAND [flags] kubeless autoscale [command] Available Commands: create automatically scale function based on monitored metrics delete delete an autoscale from Kubeless list list all autoscales in Kubeless Flags: -h, --help help for autoscale Use "kubeless autoscale [command] --help" for more information about a command. ``` Once you create an autoscaling rule for a specific function (with `kubeless autoscale create`), the corresponding HPA object will be added to the system which is going to monitor your function and auto-scale its pods based on the autoscaling rule you define in the command. The default metric is CPU, but you have option to do autoscaling with custom metrics. At this moment, Kubeless supports `qps` which stands for number of incoming requests to function per second. ```console $ kubeless autoscale create --help automatically scale function based on monitored metrics Usage: kubeless autoscale create FLAG [flags] Flags: -h, --help help for create --max int32 maximum number of replicas (default 1) --metric string metric to use for calculating the autoscale. Supported metrics: cpu, qps (default "cpu") --min int32 minimum number of replicas (default 1) -n, --namespace string Specify namespace for the autoscale --value string value of the average of the metric across all replicas. If metric is cpu, value is a number represented as percentage. If metric is qps, value must be in format of Quantity ``` The below part will walk you though setup need to be done in order to make function auto-scaled based on `qps` metric. ## Autoscaling based on CPU usage To autoscale based on CPU usage, it is *required* that your function has been deployed with CPU request limits. To do this, use the `--cpu` parameter when deploying your function. Please see the [Meaning of CPU](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) for the format of the value that should be passed. ### Further reading [Custom Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md) [Support for custom metrics](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics) ================================================ FILE: docs/building-functions.md ================================================ # Build process for functions > **Warning**: This feature is still under heavy development Kubeless includes a way of building and storing functions as docker images. This can be used to: - Persist function: Functions now become docker images that can be safely stored in a docker registry. - Speed up the process of redeploying the same function. This is specifically useful for scaling up your function. - Generate immutable function deployments. Once a function image is generated, the same image will be used every time the function is used. ### [Optional] Start a Docker registry It is possible to use the Docker Hub to store your functions but if you want your functions to be private it is necessary to deploy a different Docker registry. In case you want to use the Docker Hub or if you already have a private Docker Registry jump to the [setup section](#setup-the-build-process). In other case, if you are working with Minikube in a testing environment, you can still deploy a registry as a container in the Minikube VM. For doing that, the first step is to start Minikube setting an insecure registry IP range: ```console minikube start --insecure-registry 192.168.99.100:5000 ``` Note that `192.168.99.100` is the IP that the Minikube VM is going to use in the host machine. You will need to use a different one if the IP of your VM is different. You can retrieve the IP executing `minikube ip`. You can also specify a range: e.g. `0.0.0.0/0` would allow an insecure registry in any IP. If you already have a running Minikube VM, the previous command would not work since the insecure registry property [is set in the first boot](https://github.com/kubernetes/minikube/issues/604#issuecomment-309296149). If that is your case, stop your minikube instance, edit the file `$HOME/.minikube/machines/minikube/config.json` and change the property `HostOptions > EngineOptions > InsecureRegistry` to specify your IP. Then start your instance again. Once minikube has started you can start the registry container: ```console eval $(minikube docker-env) docker run -d -p 5000:5000 --restart=always --name registry -v /data/docker-registry:/var/lib/registry registry:2 ``` That will start the Docker registry using `/data/docker-registry` as the data folder for your images. This directory will be persisted after stopping the Minikube instance [as documented in the Minikube repository](https://github.com/kubernetes/minikube/blob/master/docs/persistent_volumes.md#persistent-volumes). ## Setup the build process In order to setup the build process the steps needed are: - Generate a Kubernetes [secret](https://kubernetes.io/docs/concepts/configuration/secret) with the credentials required to push images to the docker registry and enable the build st. In order to do so, `kubectl` has an utility that allows you to create this secret in just one command: | **Note**: The command below will generate the correct secret only if the version of `kubectl` is 1.9+ ```console kubectl create secret docker-registry kubeless-registry-credentials \ --docker-server=https://index.docker.io/v1/ \ --docker-username=user \ --docker-password=password \ --docker-email=user@example.com ``` > Note: In case you have followed the [previous guide](#start-a-docker-registry) to deploy an insecure registry you need to specify as docker-server `http://$(minikube ip):5000/v2` and any value as username, password and email. If the secret has been generated correctly you should see the following output: ```console $ kubectl get secret kubeless-registry-credentials --output="jsonpath={.data.\.dockerconfigjson}" | base64 -d {"auths":{"https://index.docker.io/v1/":{"username":"user","password":"password","email":"user@example.com","auth":"dGVfdDpwYZNz"}}} ``` - Enable the build step in the Kubeless configuration. If you have already deploy Kubeless you can enable it editing the configmap. You will need to set the property `enable-build-step: "false"` to `"true"`. If you are using an insecure registry you will need to set the property `function-registry-tls-verify: "false"` as well. ```console kubectl edit configmaps -n kubeless kubeless-config ``` - Once the build step is enabled you need to restart the controller in order for the changes to take effect: ```console kubectl delete pod -n kubeless -l kubeless=controller ``` Once the secret is available and the build step is enabled Kubeless will automatically start building function images. ## Build process The following diagram represents the building process: ![Build Process](./img/build-process.png) When a new function is created the Kubeless Controller generates two items: - A [Kubernetes job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that will use the registry credentials to push a new image under the `user` repository. It will use the checksum (SHA256) of the function specification as tag so any change in the function will generate a different image. - A Pod to run the function. This pod will wait until the previous job finishes in order to pull the function image. ## Known limitations - It is only possible to use a single registry to pull images and push them so if the build system is used with a registry different than https://index.docker.io/v1/ (the official one) the images present in the Kubeless ConfigMap should be copied to the new registry. - Base images are not currently cached, that means that every time a new build is triggered it will download the base image. ================================================ FILE: docs/cronjob-triggers.md ================================================ # Scheduling the trigger of a function Kubeless has its own CronJobTrigger which uses [Kubernetes CronJob](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/) to trigger your function in a given schedule. On this page, we're going to cover how to use it, and some basic features. ## Creating a new CronJobTrigger You can create a new cron trigger using `kubeless-cli`. In this section, we're going to show you how to create a simple function that logs `Hello world!` every 1 minute. For this example you're going to need the following tools: * [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) * [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) * [Kubeless CLI](/docs/quick-start/) After installing all the requirements, you can proceed to the step-by-step guide: ### Step 1: Create a new Minikube cluster In this step, you're going to create a new Minikube cluster called `kubeless`, where you're going to deploy the function triggers. You can run the following command on your shell: ```shell minikube start -p kubeless ``` **IMPORTANT:** If you have already created any Minikube cluster called `kubeless` you should delete it first, with `minikube delete -p kubeless` ### Step 2: Install Kubeless on your cluster Now that you have a Minikube cluster running, you can run the following command to install the latest version of Kubeless: ```shell RELEASE=$(curl -s https://api.github.com/repos/kubeless/kubeless/releases/latest | grep tag_name | cut -d '"' -f 4) && \ kubectl create ns kubeless && \ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml ``` ### Step 3: Deploy a test function For this CronJob test, we're going to use a simple function that just logs a "Hello world!" message. Since this isn't a tutorial explaining how to deploy a function you can just run the following command: ```shell kubectl apply -f https://gist.githubusercontent.com/delucca/1f3a71b7ff05f31d492dc5bfd3f3afba/raw/5237991f018f99a697e937a85e60e57dd8ac1a1c/function.yaml ``` ### Step 4: Create a new CronJob trigger To create a CronJob trigger with `kubeless-cli` you can run the following command: ```shell kubeless trigger cronjob create \ cron-test-hello-world \ --function cron-test-hello-world \ --schedule "*/1 * * * *" ``` About the provided arguments: * **The first argument** must be the trigger name you want to use * **--function** should be the name of the function you want to trigger with that cron * **--schedule** the cron pattern to trigger your function ### Step 5: Take a look on your function logs Now, wait 1 or 2 minutes and take a look at your function logs with this command: ```shell kubeless function logs cron-test-hello-world ``` You should see some `Hello world!` logs, showing that our CronJob is working as expected. ## Advanced concepts In this section, we're going to cover some advanced concepts regarding the CronJob trigger. Each item in this section will cover a given feature that you can use on your triggers. ### Passing payload data to the function While triggering a function you could pass also a payload data to it. Those will be available on `event.data` (like any other request data). You can do so with the following command: ```shell kubeless trigger cronjob (create or update) --payload ``` If you're not willing to provide a stringified JSON to the `--payload` argument, you can use `--payload-from-file` instead and pass a file path. You can provide files on the following extensions: * `.json` * `.yaml` **IMPORTANT:** Your payload must be an object, so you cannot provide a JSON array to it, but you can add a key on your object that can contain a list of items instead. ================================================ FILE: docs/debug-functions.md ================================================ # Debug Kubeless Functions In this document we will show how you can debug your function in order to spot possible errors. There could be several reasons that causes a wrong deployment. For learning how to successfully debug a function it is important to know what is the process of deploying a Kubeless function. In this guide we are going to assume that you are using the `kubeless` CLI tool to deploy your functions. If that is the case, this is the process to run a function: 1. The `kubeless` CLI read the parameters you give to it and produces a [Function](/docs/advanced-function-deployment) object that submits to the Kubernetes API server. 2. The Kubeless Function Controller detects that a new `Function` has been created and reads its content. From the function content it generates: a `ConfigMap` with the function code and its dependencies, a `Service` to make the function reachable through HTTP and a `Deployment` with the base image and all the required steps to install and run your functions. It is important to know this order because if the controller fails to deploy the `ConfigMap` or the `Service` it will never create the `Deployment`. A failure in any step will abort the process. 3. Once the `Deployment` has been created a `Pod` should be generated with your function. When a Pod starts it dinamically reads the content of your function (in case of interpreted languages). After all the above you are ready to call your function. Let's see some common mistakes and how to fix them. ## "kubeless function deploy" fails The first failure that can appear is an error in the parameters that we give to the `kubeless function deploy` command. Hopefully this errors are pretty easy to debug: ```console $ kubeless function deploy --runtime node8 \ --from-file hello.js \ --handler todos.create \ --dependencies package.json \ hello FATA[0000] Invalid runtime: node8. Supported runtimes are: python2.7, python3.4, python3.6, nodejs6, nodejs8, ruby2.4, php7.2, go1.10 ``` In the above we can see that we have a typo in the runtime. It should be `nodejs8` instead of `node8`. ## "kubeless function ls" returns "MISSING: Check controller logs" There will be cases in which the validations done in the CLI won't be enough to spot a problem in the given parameters. If that is the case the function `Deployment` will never appear. To debug this kind of issues it is necessary to check what is the error in the controller logs. To retrieve these logs execute: ``` $ kubeless function deploy foo --from-file hellowithdata.py --handler hello,foo --runtime python3.6 INFO[0000] Deploying function... INFO[0000] Function foo submitted for deployment INFO[0000] Check the deployment status executing 'kubeless function ls foo' $ kubeless function ls NAME NAMESPACE HANDLER RUNTIME DEPENDENCIES STATUS foo default hello,foo python3.6 MISSING: Check controller logs $ kubectl logs -n kubeless -l kubeless=controller -c kubeless-function-controller time="2020-10-01T01:48:29Z" level=info msg="Processing change to Function default/foo" pkg=function-controller time="2020-10-01T01:48:29Z" level=error msg="Function can not be created/updated: failed: incorrect handler format. It should be module_name.handler_name" pkg=function-controller ``` From the logs we can see that there is a problem with the handler: we specified `hello,foo` while the correct value is `hello.foo`. ## Function pod is crashing The most common error is finding that the `Deployment` is generated successfully but the function remains with the status `0/1 Not ready`. This is usually caused by a syntax error in our function or in the dependencies we specify. If our function doesn't start we should check the status of the pods executing: ``` $ kubectl get pods -l function=foo ``` ### Function pod crashes with Init:CrashLoopBackOff If our function fails with an `Init` error that could mean that: - It fails to retrieve the function content. - It fails to install dependencies. - It fails to compile our function (in compiled languages). For any of the above we should first identify which container is failing (since each step is performed in a different container): ```console $ kubectl get pods -l function=foo NAME READY STATUS RESTARTS AGE foo-74978bbf45-9xb4p 0/1 Init:CrashLoopBackOff 1 6m $ kubectl get pods -l function=foo -o yaml ... name: install ready: false restartCount: 2 ... ``` From the above we can see that is the container `install` is the one with the problem. Depending on the runtime the logs of the container will be shown as well so we can directly spot the issue. Unfortunately that is not the case so let's retrieve manually the logs of the `install` container: ```console $ kubectl logs foo-74978bbf45-9xb4p -c install --previous ... Collecting twiter (from -r /kubeless/requirements.txt (line 1)) Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution',)': /simple/twiter/ ``` Now we can spot that the problem is a typo in our requirements: `twiter` should be `twitter`. ### Function pod crashes with CrashLoopBackOff In the case the Pod remains in that state we should retrieve the logs of the runtime container: ```console $ kubectl get pods -l function=bar NAME READY STATUS RESTARTS AGE bar-7d458f6d7c-2gsh7 0/1 CrashLoopBackOff 7 15m $ kubectl logs -l function=bar kubectl logs -l function=bar Traceback (most recent call last): ... File "/kubeless/hello.py", line 2 return Hello world ^ SyntaxError: invalid syntax ``` We can see that we have a syntax error: `return Hello world` should be modified with `return "Hello world"`. ### Function returns an "Internal Server Error" There will be cases in which the pod doesn't crash but the function returns an error: ```console $ kubectl get pods -l function=test NAME READY STATUS RESTARTS AGE test-6845ff45cb-6q865 1/1 Running 0 1m $ kubeless function call test --data '{"username": "test"}' ERRO[0000] FATA[0000] an error on the server ("Internal Server Error") has prevented the request from succeeding ``` This usually means that the function is syntactically correct but it has a bug. Again for spotting the issue we should check the function logs: ```console $ kubectl logs -l function=test ... [27/Apr/2018:15:45:33 +0000] "GET /healthz HTTP/1.1" 200 2 "-" "kube-probe/." Function failed to execute: TypeError: Cannot read property 'name' of undefined at handler (/kubeless/hello.js:3:39) ... ``` We can see that it is raising an error in the line 3 of our function: ```js module.exports = { handler: (event, context) => { return "Hello " + event.data.user.name; }, }; ``` We are trying to access the property `name` of the property `user` while we are giving the function `username` instead. ## Conclusion These are just some tips to quickly identify what's gone wrong with a function. If after checking the controller and function logs (or any other information that Kubernetes may provide) you are not able to spot the error you can open an [Issue in our GitHub repository](https://github.com/kubeless/kubeless/issues) or contact us through [slack](http://slack.k8s.io) in the #kubeless channel. ================================================ FILE: docs/debugging.md ================================================ # Debugging Kubeless As a developer you'll probably be interested on the investigation of Kubeless code. A possible result of this investigation process is the proposition of a new feature or any additional contribution that could make any sense. In this context, debugging tools raises as a fundamental part of this mentioned understanding process. This document will describe the process that developers must execute in order to be able to debug Kubeless code. ## 1. Delve Delve is the component that allows you to debug Go code. This way, the first thing you need to do is install the solution in your computer. You can find the procedure to install and configure Delve in you PC (Linux, Mac and Windows) following [this link](https://github.com/derekparker/delve/tree/master/Documentation/installation). ### Important Note Some versions of Mac OS has been facing some troubles related to injection of auto-generated digital certificate required by Delve installation via Homebrew process. The error seems like that one presented below. ```console ==> Tapping go-delve/delve Cloning into '/usr/local/Homebrew/Library/Taps/go-delve/homebrew-delve'... remote: Counting objects: 7, done. remote: Compressing objects: 100% (6/6), done. remote: Total 7 (delta 0), reused 5 (delta 0), pack-reused 0 Unpacking objects: 100% (7/7), done. Tapped 1 formula (33 files, 41.4KB) ==> Installing delve from go-delve/delve ==> Using the sandbox ==> Downloading https://github.com/derekparker/delve/archive/v1.0.0-rc.1.tar.gz ==> Downloading from https://codeload.github.com/derekparker/delve/tar.gz/v1.0.0-rc.1 ######################################################################## 100.0% security: SecKeychainSearchCopyNext: The specified item could not be found in the keychain. ==> Generating dlv-cert ==> openssl req -new -newkey rsa:2048 -x509 -days 3650 -nodes -config dlv-cert.cfg -extensions codesign_reqext -batch -out dlv-cert.cer -keyout dlv-cert.key ==> [SUDO] Installing dlv-cert as root ==> sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain dlv-cert.cer Last 15 lines from /Users/gta/Library/Logs/Homebrew/delve/02.sudo: 2017-08-02 17:06:05 +0200 sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain dlv-cert.cer ``` This error commonly occurs because the installer wasn't able (for some reason) to auto-generate the required certificate for Delve installer. You can manually fix the error installing the certificate by yourself. To do that please follow the steps described below. **Unzip the delve-1.0.0-rc.1 file** ```console $ tar /Users/{you_user}/Library/Caches/Homebrew/delve-1.0.0-rc.1 ``` **Navigate to Delve/Scripts directory** ```console $ cd /Users/{your_user}/Library/Caches/Homebrew/delve-1.0.0-rc.1/scripts ``` **Execute gencert and provide your admin password** ```console $ ./gencert.sh ``` Done. Now you can try to install Delve again via Homebrew. You'll see that the operation will be completed successfully. ## 2. Configure Visual Studio Code In order to demonstrate the debug process I'll use Visual Studio Code. Visual Studio Code is a lightweight but powerful source code editor which runs on your desktop and is available for Windows, macOS and Linux. It comes with built-in support for JavaScript, TypeScript and Node.js and has a rich ecosystem of extensions for other languages (such as C++, C#, Python, PHP, Go) and runtimes (such as .NET and Unity). To know more about VS Code, follow [this link](https://code.visualstudio.com/docs). Microsoft already did a great job describing the process to configure Delve on top of VS Code. In order to accomplish that, please, follow [this link](https://github.com/Microsoft/vscode-go/wiki/Debugging-Go-code-using-VS-Code). ## 3. Debugging Kubeless If you was successful VS Code debug setup task, you now have a new directory with one file called "launch.json" inside. This file must contain the follow content inside. ```json { "version": "0.2.0", "configurations": [ { "name": "Launch", "type": "go", "request": "launch", "mode": "debug", "remotePath": "", "port": 2345, "host": "127.0.0.1", "program": "${workspaceRoot}", "env": {}, "args": [], "showLog": true } ] } ``` In order to debug a Go code, Delve looks for a "main" method, once that is the method that starts the entire execution flow. This way, could be a good practice replace the value of "program" property (currently "`${workspaceRoot}`") by the static path to the "main" file. In this case, the "program" property could be similar to this: ```json "program": "$/Users/{your_user}/Documents/Projects/.../kubeless/cmd/kubeless/" ``` Done. Now Kubeless code is done to be debugged. ================================================ FILE: docs/dev-guide.md ================================================ # Kubeless developer guide This will cover the steps need to be done in order to build your local development environment for Kubeless. ## Setting things up As Kubeless project is mainly developed in the Go Programming Language, the first thing you should do is guarantee that Go is installed and all environment variables are properly set. In this example we will use Ubuntu Linux 16.04.2 LTS as the target host on where the project will be built. ### Installing Go * Visit [https://golang.org/dl/](https://golang.org/dl/) * Download the most recent Go version (here we used 1.9) and unpack the file * Check the installation process on [https://golang.org/doc/install](https://golang.org/doc/install) * Set the Go environment variables ```bash export GOROOT=/GoDir/go export GOPATH=/GoDir/go/bin export PATH=$GOPATH:$PATH ``` ### Create a working directory for the project ```bash export KUBELESS_WORKING_DIR=$GOROOT/src/github.com/kubeless/ mkdir -p $KUBELESS_WORKING_DIR ``` ### Fork the repository 1. Visit the repo: [https://github.com/kubeless/kubeless](https://github.com/kubeless/kubeless) 1. Click `Fork` button (top right) to establish a cloud-based fork. ### Clone from your fork ```bash cd $KUBELESS_WORKING_DIR git clone https://github.com/ cd $KUBELESS_WORKING_DIR/kubeless git remote add upstream https://github.com/kubeless/kubeless.git # Never push to upstream master git remote set-url --push upstream no_push # Checking your remote set correctly git remote -v ``` ### Bootstrapping your local dev environment To get all the needed tools to build and test, run: ```bash cd $KUBELESS_WORKING_DIR/kubeless make bootstrap ``` Or if you want to use a containerized environment you can use [minikube](https://github.com/kubernetes/minikube). If you already have minikube use the following script to set it up: ```bash cd $KUBELESS_WORKING_DIR/kubeless ./script/start-test-environment.sh ``` This will start a new minikube virtual machine and will open a bash shell in which you can build any local binary or execute the tests. Note that the Kubeless code will be mounted from outside so you can still edit your files with your favourite text editor. ### Building local binaries To make the binaries for your platform, run: ```bash cd $KUBELESS_WORKING_DIR/kubeless make binary make function-controller ``` This will instruct "make" to run the scripts to build the kubeless client and the kubeless controller image. You can build kubeless for multiple platforms with: ```bash make binary-cross ``` The binaries accordingly located at `bundles/kubeless_$OS_$arch` folder. ### Building Trigger Controllers Each Kubeless trigger controller is being developed on its own repository. You can find more information about those controllers in their repositories: - [HTTP Trigger](https://github.com/kubeless/http-trigger) - [CronJob Trigger](https://github.com/kubeless/cronjob-trigger) - [Kafka Trigger](https://github.com/kubeless/kafka-trigger) - [NATS Trigger](https://github.com/kubeless/nats-trigger) - [AWS Kinesis Trigger](https://github.com/kubeless/kinesis-trigger) ### Building k8s manifests file To regenerate the most updated k8s manifests file, run: > Note that you will need the [`kubecfg`](https://github.com/ksonnet/kubecfg/releases/) in your `PATH` in order to generate the Kubeless manifests. ```bash cd $KUBELESS_WORKING_DIR export KUBECFG_JPATH=$PWD/ksonnet-lib git clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git cd $KUBELESS_WORKING_DIR/kubeless make all-yaml ``` If everything is ok, you'll have generated manifests file under the `$KUBELESS_WORKING_DIR` root directory: ``` kubeless-openshift.yaml kubeless-non-rbac.yaml kubeless.yaml ``` You can also generate them separated using the following commands: ```bash make kubeless-openshift.yaml make kubeless-non-rbac.yaml make kubeless.yaml ``` ### Uploading your kubeless image to Docker Hub Usually you will need to upload your controller image to a repository so you can make it available for your Kubernetes cluster, whenever it is running. To do so, run the commands: ```bash docker login -u= -e= docker tag kubeless-controller-manager /kubeless-test:latest docker push /kubeless-test:latest ``` Make sure your image repository is correctly referenced in the "containers" session on the yaml file. ```yaml containers: - image: fabriciosanchez/kubeless-test:latest imagePullPolicy: Always name: kubeless-controller serviceAccountName: controller-acct ``` **Hint:** take a look at the `imagePullPolicy` configuration if you are sending images with tags (e. g. "latest") to the Kubernetes cluster. This option controls the image caching mechanism for Kubernetes and you may encounter problems if new images enters the cluster with the same name. They might not be properly pulled for example. In order to upload your kubeless controller image to Kubernetes, you should use kubectl as follows, informing the yaml file with the required descriptions of your deployment. ```bash kubectl create ns kubeless kubectl create -f /kubeless.yaml ``` ### Working on your local branch Branch from it: ```bash git checkout -b myfeature ``` Then start working on your `myfeature` branch. #### Keep your branch in sync ```bash # While on your myfeature branch git fetch upstream git rebase upstream/master ``` #### Commit your changes ```bash git commit ``` Likely you go back and edit/build/test some more then `commit --amend` in a few cycles. #### Push to your origin first ```bash git push origin myfeature ``` ### Updating generated files There are several files that are automatically generated by Kubernetes [code-generator](https://github.com/kubernetes/code-generator) based on the API [specification](https://github.com/kubeless/kubeless/tree/master/pkg/apis/kubeless) in the repository. These include: * Clientset * Listers * Shared informers * Deepcopy functions If you make any changes to API specification, you will need to run `make update` to regenerate clientset, informers, lister and deepcopy functions. ### Testing kubeless with local minikube The simplest way to try kubeless is deploying it with [minikube](https://github.com/kubernetes/minikube) You can start working with the local minikube VM and test your changes building the controller image and running your tests. Once you are happy with the result and you are ready to send a pull request you should run the unit and end-to-end tests (to spot possible issues with your changes): ```bash make validation make test make build_and_test ``` Note that for running the end-to-end tests you need to provide a clean profile of minikube (you can create a specific profile for the tests with `minikube profile tests`). Any new feature/bug fix made to the code should be accompanied by a unit or end to end test. ### Create a pull request 1. Visit your fork at [https://github.com/$your_github_username/kubeless](https://github.com/$your_github_username/kubeless). 1. Click the `Compare & pull request` button next to your `myfeature` branch. 1. Make sure you fill up clearly the description, point out the particular issue your PR is mitigating, and ask for code review. ## Scripting build and publishing Example of shell script to setup a local environment, build the kubeless binaries and make it available on kubernetes. ```bash #!/bin/bash # Please set GOROOT and GOPATH appropriately before running! #rm -rf $GOROOT/src/github.com #export GOROOT= #export GOPATH= #export PATH=$GOPATH:$PATH #KUBELESS_WORKING_DIR=$GOPATH/src/github.com/kubeless/ #mkdir -p $KUBELESS_WORKING_DIR #cd $KUBELESS_WORKING_DIR #git clone https://github.com/ #cd $KUBELESS_WORKING_DIR/kubeless #git remote add upstream https://github.com/DXBrazil/kubeless #git remote set-url --push upstream no_push #git remote -v # git checkout #git fetch #make binary #make controller-image #docker login -u= -e= #docker tag kubeless-controller / #docker push / #kubectl delete -f #kubectl delete namespace kubeless #a=Terminating #while [ $a == Terminating ] #do #a=`kubectl get ns | grep Termina | awk '{print $2}'` #sleep 5 #done #kubectl create namespace kubeless #kubectl create -f ``` ## Manage dependencies We use [dep](https://github.com/golang/dep) to vendor the dependencies. Take a quick look at the README to understand how it works. Packages that Kubeless relies on are listed at [Gopkg.toml](https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md). Happy hacking! ================================================ FILE: docs/function-controller-configuration.md ================================================ # Controller configurations for Functions ## Using ConfigMap Configurations for functions can be done in `ConfigMap`: `kubeless-config` which is a part of `Kubeless` deployment manifests. Deployments for function can be configured in `data` inside the `ConfigMap`, using key `deployment`, which takes a string in the form of `yaml/json` and is driven by the structure of [v1.Deployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#deployment-v1-apps). Unknown fields or duplicate keys in the provided deployment data will result in an error. E.g. In the below configuration, new **annotations** are added globally to all function deployments and podTemplates and **replicas** for each function pod will be `2`. ```yaml apiVersion: v1 data: deployment: |- { "metadata": { "annotations":{ "annotation-to-deployment": "value" } }, "spec": { "replicas": 2, "template": { "spec": { "annotations": { "annotations-to-pod": "value" }, "containers": [{ "resources": { "requests": { "cpu": "100m" } } }] } } } } ingress-enabled: "false" service-type: ClusterIP kind: ConfigMap metadata: name: kubeless-config namespace: kubeless ``` The following configuration will result in an error because of duplicate key: ```yaml apiVersion: v1 data: deployment: |- { "metadata": { "annotations":{ "annotation-to-deployment": "value", "annotation-to-deployment": "other value", } } } ingress-enabled: "false" service-type: ClusterIP kind: ConfigMap metadata: name: kubeless-config namespace: kubeless ``` The following configuration will result in an error because of unknown key: ```yaml apiVersion: v1 data: deployment: |- { "unknown": "hack", } ingress-enabled: "false" service-type: ClusterIP kind: ConfigMap metadata: name: kubeless-config namespace: kubeless ``` It is **recommended** to have controlled custom configurations on the following **items** (*but is not limited to just these*): > Warning: You should know what you are doing. - v1beta2.Deployment.ObjectMeta.Annotations - v1beta2.Deployment.Spec.replicas - v1beta2.Deployment.Spec.Strategy - v1beta2.Deployment.Spec.Template.ObjectMeta.Annotations - v1beta2.Deployment.Spec.Template.Spec.NodeSelector - v1beta2.Deployment.Spec.Template.Spec.NodeName Having said all that, if one wants to override configurations from the `ConfigMap` then in `Function` manifest one needs to provide the details as follows: ```yaml apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: testfunc spec: deployment: ### Definition as per v1beta2.Deployment metadata: annotations: "annotation-to-deploy": "final-value-in-deployment" spec: replicas: 2 ### Final deployment gets Replicas as 2 template: metadata: annotations: "annotation-to-pod": "value" deps: "" function: | module.exports = { foo: function (req, res) { res.end('hello world updated!!!') } } function-content-type: text handler: hello.foo runtime: nodejs8 service: ports: - name: http-function-port port: 8080 protocol: TCP targetPort: 8080 type: ClusterIP ``` ## Install kubeless in different namespace If you have installed kubeless into some other namespace (which is not called `kubeless`) or changed the name of the config file from kubeless-config to something else, then you have to export the kubeless namespace and the name of kubeless config as environment variables before using kubless cli. This can be done as follows: ```bash $ export KUBELESS_NAMESPACE= $ export KUBELESS_CONFIG= ``` or the following information can be added to `functions.kubeless.io` `CustomResourceDefinition` as `annotations`. E.g. below `CustomResourceDefinition` will signify `kubeless-controller` is installed in namespace `kubless-new-namespace` and config name is `kubeless-config-new-name` ```yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: functions.kubeless.io annotations: kubeless.io/namespace: kubless-new-namespace kubeless.io/config: kubeless-config-new-name spec: group: kubeless.io names: kind: Function plural: functions singular: function scope: Namespaced version: v1beta1 ``` The priority of deciding the `namespace` and `config name` (highest to lowest) is: - Environment variables - Annotations in `functions.kubeless.io` CRD - default: `namespace` is `kubeless` and `ConfigMap` is `kubeless-config` ### Install several instances of kubeless (multi-tenancy) It is possible to install Kubeless in several namespaces. This allow administrators to have several instances of Kubeless that can be configured differently (for example using different runtime images or with different Docker credentials). In order to install Kubeless in a custom namespace (or in several ones) it's necessary to: - Install the `CustomResourceDefinitions` and `ClusterRoles` as in the default scenario. These resources are not namespaced which means that you need to install them just once. It is also recommendable to split the current rules of the `ClusterRole` into two different roles: one just for accessing cluster-wide resources like `CustomResourceDefinitions` and a second one with the rest of resources. That way it's possible to attach the first `ClusterRole` to a `ClusterRoleBinding` as the default scenario but attaching the second one with a namespaced `RoleBinding` to avoid unauthorized access to other namespaces. More information about `RBAC` [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). - The rest of the resources you can find in the installation manifest (`Deployment`, `ConfigMap`, `ServiceAccount`...) are namespaced. This means that it's required to modify the `metadata.namespace` of each one of those to target the correct namespace. - The next step is to set in the Kubeless ConfigMap the namespace in which the controller should listen for functions. This is set in the variable `functions-namespace`. If this value is empty it will try to find functions in all namespaces. This is an example of a manifest (simplified) for a Kubeless instance deployed in the namespace "test": ```yaml # RBAC Configuration apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: kubeless-controller-read rules: - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - get - list --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: kubeless-controller-deployer rules: - apiGroups: - "" resources: - services - configmaps verbs: ... # The rest of the ClusterRole has been omitted --- apiVersion: v1 kind: ServiceAccount metadata: name: controller-acct namespace: test --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: kubeless-controller-read-test roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubeless-controller-read subjects: - kind: ServiceAccount name: controller-acct namespace: test --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: kubeless-controller-deployer namespace: test roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubeless-controller-deployer subjects: - kind: ServiceAccount name: controller-acct # Kubeless Configuration --- apiVersion: v1 kind: ConfigMap metadata: name: kubeless-config namespace: test data: functions-namespace: "test" ... # The rest of the ConfigMap data has been omitted # Kubeless core controller --- apiVersion: apps/v1beta1 kind: Deployment metadata: labels: kubeless: controller name: kubeless-controller-manager namespace: test spec: ... # The rest of the Deployment has been omitted ``` The same process should be followed for any trigger controller installed (Kafka, Nats, ...): Adapt the RBAC configuration and change the resources namespace. These controllers will read the `functions-namespace` property from the main ConfigMap. ## Using custom images It is possible to configure the different images that Kubeless uses to deploy and execute functions. In this ConfigMap you can configure: - Different or additional runtimes. For doing so it is possible to modify/add a runtime in the field `runtime-images`. Runtimes are categorized by major version. See the guide for [implementing a new runtime](/docs/implementing-new-runtime) for more information. Each major version has: - Name: Unique ID of the runtime. It should contain the runtime name and version. - Version: Major and minor version of the runtime. - Runtime Image: Image used to execute the function. - Init Image: Image used for installing the function and/or dependencies. - (Optional) Image Pull Secrets: Secret required to pull the image in case the repository is private. - (Optional) Environment variables. - (Optional) Secrets: Shared with the container as volumes mounted at `/var/run/secrets/kubeless.io/`. - The image used to populate the base image with the function. This is called `provision-image`. This image should have at least `unzip`, `GNU tar`, `gzip`, `bzip2`, `xz` and `curl`. It is also possible to specify `provision-image-secret` to specify a secret to pull that image from a private registry. - The image used to build function images. This is called `builder-image`. This image is optional since its usage can be disabled with the property `enable-build-step`. A Dockerfile to build this image can be found [here](https://github.com/kubeless/kubeless/tree/master/docker/function-image-builder). It is also possible to specify `builder-image-secret` to specify a secret to pull that image from a private registry. ## Authenticate Kubeless Function Controller using OAuth Bearer Token In some non-RBAC k8s deployments using webhook authorization, service accounts may have insufficient privileges to perform all k8s operations that the Kubeless Function Controller requires for interacting with the cluster. It's possible to override the default behavior of the Kubeless Function Controller using a k8s serviceaccount for authentication with the cluster and instead use a provided OAuth Bearer token for all k8s operations. This can be done by creating a k8s secret and mounting that secret as a volume on controller pods, then setting the environmental variable `KUBELESS_TOKEN_FILE_PATH` to the filepath of that secret. Be sure to set this environmental variable on the controller template spec or to every pod created in the deployment. For example, if the bearer token is mounted at /mnt/secrets/bearer-token, this k8s spec can use it: ```yaml # Kubeless core controller --- apiVersion: apps/v1beta1 kind: Deployment metadata: name: kubeless-controller-manager namespace: kubeless labels: kubeless: controller spec: template: metadata: labels: kubeless: controller spec: containers: - env: - name: KUBELESS_TOKEN_FILE_PATH value: /mnt/secrets/bearer-token ... # The rest of the Deployment has been omitted ``` ================================================ FILE: docs/http-triggers.md ================================================ # Expose and secure Kubeless functions Kubeless leverages [Kubernetes ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to provide routing for functions. By default, a deployed function will be matched to a Kubernetes service using ClusterIP as the service. That means that the function is not exposed publicly. Because of that, we provide the `kubeless trigger http` command that can make a function publicly available. This guide provides a quick sample on how to do it. ## Ingress controller In order to create routes for functions in Kubeless, you must have an Ingress controller running. There are several options to deploy it. In this document we point to several different solutions that you can choose: > Note: In case Kubeless is running in a GKE cluster you will need to disable the default Ingress controller provided by GKE. The native controller doesn't work with services that have a type different than NodePort (see [this issue](https://github.com/kubernetes/ingress-nginx/issues/1417)). In order to expose a Kubeless function, disable the default controller and deploy one of the options described below. ### Minikube Ingress addon If your cluster is running in Minikube you can enable the Ingress controller just executing: ```console minikube addons enable ingress ``` After a couple of minutes you should be able to see the controller running in the `kube-system` namespace: ```console $ kubectl get pod -n kube-system -l app=nginx-ingress-controller NAME READY STATUS RESTARTS AGE nginx-ingress-controller-pj2pz 1/1 Running 0 25s ``` ### Nginx Ingress You can deploy a Nginx Ingress controller manually (it is the same controller than in the Minikube addon) following the instructions that can be found [here](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/README.md). ### Kong Ingress [Kong](https://getkong.org) have an Ingress controller that can be used to expose functions and secure them. You can check the deployment instructions in [their repository](https://github.com/Kong/kubernetes-ingress-controller/tree/master/docs/deployment). Once Kong is deployed you should be able to see the controller in the `kong` namespace: ```console kubectl get pods -n kong NAME READY STATUS RESTARTS AGE kong-56c4cc55c9-78srh 1/1 Running 0 1h kong-ingress-controller-79f48dd4d7-ql4vw 2/2 Running 0 1h postgres-0 1/1 Running 1 22h ``` ### Traefik Ingress [Traefik](http://traefik.io) provides an Ingress controller as well. To deploy it follow the steps described at [this guide](https://docs.traefik.io/user-guide/kubernetes/). As a result, you will be able to see the traefik controller running in the `kube-system` namespace: ```console kubectl get pod -n kube-system -l name=traefik-ingress-lb NAME READY STATUS RESTARTS AGE traefik-ingress-controller-57b4767f99-g42n2 1/1 Running 0 1m ``` ## Deploy function with Kubeless CLI Once you have a Ingress Controller running you should be able to start deploying functions and expose them publicly. First deploy a function: ```console $ cd examples $ kubeless function deploy get-python \ --runtime python2.7 \ --handler helloget.foo \ --from-file python/helloget.py $ kubectl get po NAME READY STATUS RESTARTS AGE get-python-1796153810-krrf3 1/1 Running 0 2s $ kubectl get svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE get-python 10.0.0.26 8080/TCP 44s ``` ## Expose a function In order to expose a function, it is necessary to create a HTTP Trigger object. The Kubeless CLI provides the commands required to do so: ```console $ kubeless trigger http create --help Create a http trigger Usage: kubeless trigger http create FLAG [flags] Flags: --basic-auth-secret string Specify an existing secret name for basic authentication --cors-enable If true then cors will be enabled on Http Trigger --enableTLSAcme If true, routing rule will be configured for use with kube-lego --function-name string Name of the function to be associated with trigger --gateway string Specify a valid gateway for the Ingress. Supported: nginx, traefik, kong (default "nginx") -h, --help help for create --hostname string Specify a valid hostname for the function --namespace string Specify namespace for the HTTP trigger --path string Ingress path for the function --tls-secret string Specify an existing secret that contains a TLS private key and certificate to secure ingress ``` We will create a http trigger to `get-python` function: ```console $ kubeless trigger http create get-python --function-name get-python ``` This command will create an ingress object. We can see it with kubectl (this guide is run on minikube): ```console $ kubectl get ing NAME HOSTS ADDRESS PORTS AGE get-python get-python.192.168.99.100.nip.io 192.168.99.100 80 59s ``` Kubeless creates a default hostname in form of ..nip.io. Alternatively, you can provide a real hostname with `--hostname` flag or use a different `--path` like this: ```console $ kubeless trigger http create get-python --function-name get-python --path echo --hostname example.com $ kubectl get ing NAME HOSTS ADDRESS PORTS AGE get-python example.com 80 6s ``` But you have to make sure your hostname is configured properly. You can test the created HTTP trigger with the following command: ```console $ curl --data '{"Another": "Echo"}' \ --header "Host: get-python.192.168.99.100.nip.io" \ --header "Content-Type:application/json" \ 192.168.99.100/echo {"Another": "Echo"} ``` ## Enable TLS Once you have one of the supported Ingress Controller it is possible to enable TLS using a certificate: - Automatically generated using Let's Encrypt and [cert-manager](https://github.com/jetstack/cert-manager) - Self signed - Provided by a certificate issuer ### Using Let’s Encrypt’s CA When you have running Kube-lego, you can deploy function and create an HTTP trigger with flag `--enableTLSAcme` enabled as below: ```console $ kubeless trigger http create get-python --function-name get-python --path get-python --enableTLSAcme ``` Running the above command, Kubeless will automatically create a ingress object with annotation `kubernetes.io/tls-acme: 'true'` set which will be used by Kube-lego to configure the service certificate. ### Create a self-signed certificate If you don't have a working certificate it is possible to generate a dummy one to be able to use TLS with your functions. To generate the certificate and its secret execute the following: ```console $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=foo.bar.com" Generating a 2048 bit RSA private key ..........................................................................+++ .......................................................+++ writing new private key to 'tls.key' ----- $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created ``` ### Use an existing certificate Now that you have a certificate, you can use it to setup TLS for the HTTP trigger, there by securing functions: ```console $ kubeless trigger http create get-python --function-name get-python --hostname foo.bar.com --tls-secret secret-name ``` Once the Ingress rule has been deployed you can verify that the function is accessible trough HTTPS: ```console $ kubectl get ingress NAME HOSTS ADDRESS PORTS AGE get-python foo.bar.com 192.168.99.100 80, 443 4m $ curl -k https://192.168.99.100 --header 'Host: foo.bar.com' hello world ``` ## Enable Basic Authentication Once you have one of the supported Ingress Controller it is possible to enable Basic Authentication either: - Creating a secret with the content of the user to authenticate. This is valid for the Nginx and Traefik controllers. - Adding the Kong plugin for basic authentication. ### Enable Basic Authentication with Nginx or Traefik For enabling authentication for a function, the first thing is creating a secret with the user and password: ```console $ htpasswd -cb auth foo bar Adding password for user foo $ kubectl create secret generic basic-auth --from-file=auth secret "basic-auth" created ``` Now you just need to create a HTTP trigger using that secret. ```console $ kubeless trigger http create get-python --function-name get-python --basic-auth-secret basic-auth --gateway nginx INFO[0000] HTTP trigger get-python created in namespace default successfully! ``` > Note: The command is the same for the case of Traefik, just use `--gateway traefik` instead Once the Ingress rule has been deployed you can verify that the function is accessible just for the proper user and password: ```console $ kubectl get ingress NAME HOSTS ADDRESS PORTS AGE get-python get-python.192.168.99.100.nip.io 192.168.99.100 80 1m $ curl --header 'Host: get-python.192.168.99.100.nip.io' 192.168.99.100 401 Authorization Required

401 Authorization Required


nginx/1.13.7
$ curl -u foo:bar --header 'Host: get-python.192.168.99.100.nip.io' 192.168.99.100 hello world ``` ### Enable Basic Authentication with Kong It is not yet supported to create an HTTP trigger with basic authentication using Kong as backend but the steps to do it manually are pretty simple. It is possible to do so using Kong plugins. In the [next section](#enable-kong-security-plugins) we explain how to enable any of the available Kong plugins and in particular we explain how to enable the basic-auth plugin. ## Enable CORS It's possible to enable CORS requests at the HTTPTrigger level. To do so use the --cors-enable flag when deploying the HTTPTrigger or add the field cors-enable: true to the YAML manifest. ## Add arbitrary annotations It is also possible to add any annotation to the resulting Ingress object if you add those to the HTTPTrigger. For example: ``` apiVersion: kubeless.io/v1beta1 kind: HTTPTrigger metadata: name: cors-trigger annotations: nginx.ingress.kubernetes.io/enable-cors: "true" nginx.ingress.kubernetes.io/cors-allow-methods: "GET" spec: function-name: get-python host-name: example.com path: echo ``` The above will create an Ingress object with the annotations nginx.ingress.kubernetes.io/enable-cors: "true" and nginx.ingress.kubernetes.io/cors-allow-methods: "GET". ## Enable Kong Security plugins Kong has available several free [plugins](https://konghq.com/plugins/) that can be used along with the Kong Ingress controller for securing the access to Kubeless functions. In particular, the list of security plugins that can be used is: - Basic Authentication - Key Authentication - OAuth 2.0 - JWT - ACL - HMAC Authentication - LDAP Authentication Once you have Kong and its Ingress controller running in your cluster the generic steps to use any plugin are: - Deploy a basic HTTP trigger for the target function using `--gateway kong`. - Create a Kubernetes object for the plugin you want to use. - Add a Kong Consumer. - Create the specific credentials or follow any additional steps that the plugin may require. - Associate the credentials/plugin with the Ingress object created in the first step. The specific steps that are required to use a plugin can be found in the [plugins](https://konghq.com/plugins/) page. As an example we will configure the plugin [basic-auth](https://getkong.org/plugins/basic-authentication/) for our function `get-python`. ### Deploy a basic HTTP trigger First we need to create a HTTP trigger to generate the Ingress object that will expose our function. ```console $ kubeless trigger http create get-python --function-name get-python --gateway kong --hostname foo.bar.com INFO[0000] HTTP trigger get-python created in namespace default successfully! ``` ### Add the basic-auth plugin The next step is creating the Custom Resource related to the Kong basic authentication plugin. You can see the possible configuration options available in the [plugin documentation](https://getkong.org/plugins/basic-authentication). ```console $ echo " apiVersion: configuration.konghq.com/v1 kind: KongPlugin metadata: name: basic-auth consumerRef: basic-auth config: hide_credentials: false " | kubectl create -f - kongplugin "basic-auth" created ``` #### Create a Consumer Now we need a [`Consumer`](https://getkong.org/docs/0.13.x/getting-started/adding-consumers/#adding-consumers) for the plugin. ```console $ echo " apiVersion: configuration.konghq.com/v1 kind: KongConsumer metadata: name: basic-auth username: user " | kubectl create -f - kongconsumer "basic-auth" created ``` #### Create user credentials Now that we have a consumer we need to create the basic authentication credentials that the function is going to use: ```console $ echo " apiVersion: configuration.konghq.com/v1 kind: KongCredential metadata: name: basic-auth consumerRef: basic-auth type: basic-auth config: username: user password: pass " | kubectl create -f - kongcredential "basic-auth" created ``` #### Associate the credentials with the Ingress object The final step is to enable the credentials and the plugin for the function. For doing so we just need to add an `Annotation` in the Ingress object that we generated in the first step: ```console $ kubectl patch ingress get-python \ -p '{"metadata":{"annotations":{"basic-auth.plugin.konghq.com":"basic-auth"}}}' ingress "get-python" patched ``` Now that the plugin has been enabled we can verify that it is working: ```console $ export PROXY_IP=$(minikube service -n kong kong-proxy --url --format "{{ .IP }}" | head -1) $ export HTTP_PORT=$(minikube service -n kong kong-proxy --url --format "{{ .Port }}" | head -1) $ curl --header "Host: foo.bar.com" ${PROXY_IP}:${HTTP_PORT} {"message":"Unauthorized"} $ curl -u user:pass --header "Host: foo.bar.com" ${PROXY_IP}:${HTTP_PORT} hello world ``` ================================================ FILE: docs/implementing-new-runtime.md ================================================ # How to implement a new Kubeless run time Runtimes are developed in this repository: [https://github.com/kubeless/runtimes](https://github.com/kubeless/runtimes) To implement a new runtime or improve the existing ones check the [Contributing](https://github.com/kubeless/runtimes/blob/master/CONTRIBUTING.md) and [Developer](https://github.com/kubeless/runtimes/blob/master/DEVELOPER_GUIDE.md) guides. ================================================ FILE: docs/implementing-new-trigger.md ================================================ # How to add a new event source as Trigger Kubeless [architecture](/docs/architecture) is built on core concepts of Functions, Triggers and Runtime. A _Trigger_ in Kubeless represents association between an event source and functions that need to be invoked on an event in the event source. Kubeless fully leverages the Kubernetes concepts of [custom resource definition](https://kubernetes.io/docs/concepts/api-extension/custom-resources/)(CRD) and [custom controllers](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers). Each trigger is expected to be modelled as Kubernetes CRD. A trigger specific custom resource controller is expected to be written that realizes how deployed functions are invoked when event occurs. Following sections document how one can add a new event source as _Trigger_ into Kubeless. ## Triggers development repository Each Kubeless trigger controller is being developed on its own repository. You can find more information about those controllers in their repositories. If you want to create a new trigger you will need to create a new repository for that. These are the triggers currently available that can be used as templates for new ones: - [HTTP Trigger](https://github.com/kubeless/http-trigger) - [CronJob Trigger](https://github.com/kubeless/cronjob-trigger) - [Kafka Trigger](https://github.com/kubeless/kafka-trigger) - [NATS Trigger](https://github.com/kubeless/nats-trigger) ## Model event source as CRD First step is to create a new CRD for the event source. CRD for the new triggers will be largely similar to the existing ones. For example below is the CRD for Kafka trigger ```yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: kafkatriggers.kubeless.io spec: group: kubeless.io names: kind: KafkaTrigger plural: kafkatriggers singular: kafkatrigger scope: Namespaced version: v1beta1 ``` Give appropriate and intutive name to the event source. ## Model the CRD spec Once CRD is defined, you need to model the event source and its attributes as resource object spec. Please see [API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) for key attributes of Kubernetes API resource object. Except fot `Spec` part rest of the needed parts to define a Trigger are pretty similar to other Triggers. For e.g below is the definition of [Kafka Trigger](https://github.com/kubeless/kafka-trigger/blob/master/pkg/apis/kubeless/v1beta1/kafka_trigger.go) ```go type KafkaTrigger struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` Spec KafkaTriggerSpec `json:"spec"` } ``` You need to model the event source attributes in to Spec attribute of new trigger. Depending on the nature of event source you may want to associate single function or multiple functions with the event source. Use appropriate mechanism to represent the association. For e.g Kafka trigger uses Kubernetes label selector to associate any function with matching label with the event source. ## Code Generation Once you have definged the new trigger, please ensure its placed in `pkg/apis/kubeless/v1beta1/` path, and update `register.go` go to include new trigger type. Now you can auto-generate the clientset, lister and informers for the new API resource object as well but running `make update` or `./hack/update-codegen.sh` within the trigger repository. Auto-generated clientset, lister and informers comes handy in writing the controller in next step. ## CRD controller Here is the most important step, i.e. writing controller itself. As far as the skeleton of controller goes, it would be pretty similar to existing controllers like Kafka trigger controller, nats trigger controller or http trigger controller. Functionally controller does two important things - watch Kuberentes API server for CRUD operations on the new trigger object and take appropriate actions. - when an event occurs in the event source trigger the associated functions. Please read the code and logic for the existing [Kafka controller](https://github.com/kubeless/kafka-trigger/tree/master/pkg/controller) as a reference. ## Building controller binary and docker image Ensure you controller is an independent binary that can be built from the Makefile. Please follow one of the existing controller [cmd](https://github.com/kubeless/kafka-trigger/tree/master/cmd) as referance. Also ensure there is corresponding `Dockerfile` to build the controller image. Please see the dockerfile for other trigger controller as a [reference](https://github.com/kubeless/kafka-trigger/tree/master/docker). Add appropriate Makefile targets so that controller binary and docker image can be built. ## Manifest Create a jsonnet file for the new trigger and ensure that generated yaml file has CRD definition, deployment for the trigger controller and necessary RBAC rules. Again most of the stuff is common with Kafka or HTTP triggers, so take existing jsonnet manifests as a referance. ## CI Once the new trigger is working it's important to add tests to ensure and preserve the trigger functionality. Each trigger should contain: - Unit tests covering the basic functionality. - End-to-end tests that ensure the compatibility with the latest image of the Kubeless core. The CI used to run the tests is TravisCI. You can check examples of how Travis is configured [here](https://github.com/kubeless/kafka-trigger/blob/master/.circleci/config.yml). This file should define at least 4 jobs: - One for building the binaries and manifests. - Another one to test the functionality end-to-end in a Minikube scenario. - A third one to push the image used in the tests as `latest`. - A final one to auto generate a release in Github in case it's building a new tag. Most of the functionality for the above depends on scripts that have been already developed so you just need to change some data and names from the YAML to make it work. The tests to run are defined in the folder `tests/` of each repository. These are [`bats`](https://github.com/sstephenson/bats) that loads a common library (`script/libtest.bash`) and execute some simple scenarios. Again you can take Kafka as an example for some useful scenarios to test. ================================================ FILE: docs/kubeless-functions.md ================================================ # Kubeless Functions Functions are the main entity in Kubeless. It is possible to write Functions in different languages but all of them share common properties like the generic interface, the default timeout or the runtime UID. In this document we are going to explain some these common properties and different runtimes availables in Kubeless. You can find in depth details about the Function specification [here](/docs/advanced-function-deployment). ## Functions Interface Every function receives two arguments: `event` and `context`. The first argument contains information about the source of the event that the function has received. The second contains general information about the function like its name or maximum timeout. This is a representation in YAML of a Kafka event: ```yaml event: data: # Event data foo: "bar" # The data is parsed as JSON when required event-id: "2ebb072eb24264f55b3fff" # Event ID event-type: "application/json" # Event content type event-time: "2009-11-10 23:00:00 +0000 UTC" # Timestamp of the event source event-namespace: "kafkatriggers.kubeless.io" # Event emitter extensions: # Optional parameters request: ... # Reference to the request received response: ... # Reference to the response to send # (specific properties will depend on the function language) context: function-name: "pubsub-nodejs" timeout: "180" runtime: "nodejs6" memory-limit: "128M" ``` Functions should return a string that will be used as the HTTP response for the caller. Some runtimes may support different types (like objects) for the returned values. You can check basic examples of every language supported in the [examples](https://github.com/kubeless/kubeless/tree/master/examples) folder. ## Functions Timeout Runtimes have a maximum timeout set by the environment variable FUNC_TIMEOUT. This environment variable can be set using the CLI option `--timeout`. The default value is 180 seconds. If a function takes more than that in being executed, the process will be terminated. ## Runtime User As a [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) functions are configured to run with an unprivileged user (UID 1000) by default (except for OpenShift where the UID is automatically set). This prevent functions from having root privileges. This default behaviour can be overridden specifying a different Security Context in the `Deployment` template that is part of the Function Spec. ## Scheduled functions It is possible to deploy functions that should be triggered following a certain schedule. For specifying the execution frequency we use the [Cron](https://en.wikipedia.org/wiki/Cron) format. Every time a scheduled function is executed, a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) is started. This Job will do a HTTP GET request to the function service and will be successful as far as the function returns 200 OK. For executing scheduled functions we use Kubernetes [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) using mostly the default options which means: - If a Job fails, it won't be restarted but it will be retried in the next scheduled event. The maximum time that a Job will exist is specified with the function timeout (180 seconds by default). - The concurrency policy is set to `Allow` so concurrent jobs may exists. - The history limit is set to maintain as maximum three successful jobs (and one failed). If for some reason you want to modify one of the default values for a certain function you can execute `kubectl edit cronjob trigger-` (where `func_name` is the name of your function) and modify the fields required. Once it is saved the CronJob will be updated. ## Monitoring functions Some Kubeless runtimes expose metrics at `/metrics` endpoint and these metrics will be collected by Prometheus. We also include a prometheus setup in [`manifests/monitoring`](https://github.com/kubeless/kubeless/blob/master/manifests/monitoring/prometheus.yaml) to help you easier set it up. The metrics collected are: Number of calls, succeeded and error executions and the time spent per call. ## Runtime variants Check [this document](/docs/runtimes) to get more details about supported runtimes and languages. ================================================ FILE: docs/kubeless-on-AKS.md ================================================ # Kubeless on Azure Kubernetes Service ## 1. Introduction This guide goes over the required steps for deploying Kubeless in Azure AKS (Azure Kubernetes Service). The steps in this guide require for you to install: - [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) (`az`): This CLI will be used to create the cluster in AKS. - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/): Used for installing Kubeless. ## 2. Creating an AKS cluster In order to get Kubeless up and running on top of AKS of course you'll need an AKS cluster. Fortunately, Microsoft already did a great job documenting the entire process to accomplish that. You can reach out that documentation following [this link](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough#create-aks-cluster). ### Important notes regarding the cluster creation itself * In the same document the property `--generate-ssh-keys` was used to generate the required SSH keys to the cluster deployment. If you would like to create your own keys, please use `--ssh-key-value` passing the path to your SSH pub file. ## 3. Installing "Kubeless-Controller" Assuming that the Kubernetes cluster is up and running on top of ACS, its time to install Kubeless. To accomplish, please, follow the steps described on Kubeless [Quick-Start Guide](/docs/quick-start). > NOTE: For [Azure AD enabled AKS clusters](https://docs.microsoft.com/en-us/azure/aks/aad-integration), support for the `kubeless` CLI to authenticate against Azure AD is only available in versions greater than `v1.0.1`. ================================================ FILE: docs/misc/kafka-pv-gke.yaml ================================================ apiVersion: v1 kind: PersistentVolume metadata: name: kafka-pv labels: kubeless: kafka spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain gcePersistentDisk: pdName: kubeless-kafka fsType: ext4 ================================================ FILE: docs/misc/kubeless-grafana-dashboard.json ================================================ { "id": 1, "title": "Kubeless", "description": "Dashboard for Kubeless", "tags": [], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [ { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "prometheus", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 1, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 6, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum( rate(function_calls_total[5m])) by (function)", "interval": "", "intervalFactor": 2, "legendFormat": "function={{function}}", "metric": "function_calls_total", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Function call rate", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ] }, { "aliasColors": {}, "bars": false, "datasource": "prometheus", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 2, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 6, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum( rate(function_failures_total[5m])) by (function)", "interval": "", "intervalFactor": 2, "legendFormat": "function={{function}}", "metric": "function_failures_total", "refId": "A", "step": 10 } ], "timeFrom": null, "timeShift": null, "title": "Function failure rate", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ] } ], "title": "Row" }, { "title": "New row", "height": "250px", "editable": true, "collapse": false, "panels": [ { "title": "Execution duration", "error": false, "span": 12, "editable": true, "type": "graph", "isNew": true, "id": 3, "targets": [ { "refId": "A", "expr": "sum(rate(function_duration_seconds_sum[1m])) by (function)", "intervalFactor": 2, "metric": "function_duration_seconds_sum", "step": 4 } ], "datasource": "prometheus", "renderer": "flot", "yaxes": [ { "label": null, "show": true, "logBase": 1, "min": null, "max": null, "format": "short" }, { "label": null, "show": true, "logBase": 1, "min": null, "max": null, "format": "short" } ], "xaxis": { "show": true }, "grid": { "threshold1": null, "threshold2": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "lines": true, "fill": 1, "linewidth": 2, "points": false, "pointradius": 5, "bars": false, "stack": false, "percentage": false, "legend": { "show": true, "values": false, "min": false, "max": false, "current": false, "total": false, "avg": false }, "nullPointMode": "connected", "steppedLine": false, "tooltip": { "value_type": "cumulative", "shared": true, "sort": 0, "msResolution": true }, "timeFrom": null, "timeShift": null, "aliasColors": {}, "seriesOverrides": [], "links": [] } ] } ], "time": { "from": "2017-11-23T05:29:50.547Z", "to": "2017-11-23T06:51:57.387Z" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [] }, "annotations": { "list": [] }, "refresh": false, "schemaVersion": 12, "version": 5, "links": [], "gnetId": null } ================================================ FILE: docs/misc/zookeeper-pv-gke.yaml ================================================ apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-pv labels: kubeless: zookeeper spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain gcePersistentDisk: pdName: kubeless-zookeeper fsType: ext4 ================================================ FILE: docs/monitoring.md ================================================ # Monitoring ## Prometheus Kubeless monitoring relies on Prometheus. The language runtimes are instrumented to automatically collect metrics for each function. Prometheus will scrape those metrics and display them in the default Prometheus dashboard. ## Grafana You could also use Grafana to visualize the prometheus metrics exposed by Kubeless. Example of a Grafana dashboard for Kubeless showing function call rate, function failure rate and execution duration: ![Grafana](./img/kubeless-grafana-dashboard.png) Sample dashboard JSON file available [here](./misc/kubeless-grafana-dashboard.json) ================================================ FILE: docs/proposals/decoupling-triggers-and-runtimes.md ================================================ # Decoupling triggers and runtimes ## Definition of the problem Currently for each new runtime we need to add a container image per trigger. We should design a runtime abstraction. So that: - Triggers can be added (http or event or something else). - These can be in a single language i.e golang - Runtimes can be added more easily - One function can be triggered by more than one trigger source - One trigger can execute more than one function We need to define interface between trigger container and runtime. What type of protocol to use to pass the request and response. ### **Warning** Changing the interface between triggers (currently embed in the runtime container) and functions will cause a breaking change. Functions working with previous versions of Kubeless may not work depending on the format of the interface chosen. ## User POV From the users point of view we would support: ```bash # As today, deploy runtime + trigger kubeless function deploy func --trigger-http [...] # Deploy the runtime without a trigger kubeless function deploy func [...] # Add a trigger linking it to a function kubeless trigger add http --path /func func kubeless trigger add kafka_topic --topic s3 func ``` Note that splitting the trigger type in different "verbs" allow us to easily have flags per trigger type. Disclaimer: we would need to define possible flags for each trigger ## Trigger CRD For enabling the above, we propose to create a new Custom Resource Definition (CRD) for triggers. This CRD will contain the fields required by its _trigger controller_ to create the resulting actionable items (like an Ingress rule for HTTP requests or a Kafka consumers). Each _trigger_ instance will contain as well the IDs of the functions that the _trigger_ is bind to. ## Suggested architecture approach For the moment, we will assume that the interface protocol between the trigger and the runtime will be HTTP (discussed later). So far we can identify two types of trigger, each one of them will be managed by a _trigger controller_: - HTTP Trigger: This trigger should redirect HTTP(s) requests from/to the _runtime_. - Kafka Topic Trigger: This trigger should translate topic messages to HTTP request. This way runtimes can have an unique interface, regardless of its trigger. This diagram shows a simplified desired architecture: ![Triggers and runtime relation](./img/triggers-runtime-diagram.png) Regardless of implementation details, the Kubeless Client (or any other client) will create a Custom Resource for the desired trigger type (HTTP or Kafka), this new instance will be detected by the _Controller_ that will obtain the required information and create an _actionable item_ (like an Ingress rule). Whenever a request is made, the _actionable item_ will make an HTTP request to the _runtime container_. This will call the user function with the interface in the [section below](#function-input). Finally, the runtime container will send back the returned value of the function to the caller. This response can be discarded if the function is triggered asynchronously (for example if the trigger is a Kafka message or a scheduled event). We will not enter into the details of the trigger resource definitions or implementations since they will be handled separately following the above architecture. ## Functions interface Right now it doesn’t exist a standard for the interface between functions and triggers. The [CNCF document](https://docs.google.com/document/d/1UjW8bt5O8QBgQRILJVKZJej_IuNnxl20AJu9wA8wcdI/) doesn’t get into specifics about how the two pieces should communicate between them or which protocol they should use. Some of the existing solutions are: - AWS Lambda: - Protocol: The interface between functions and trigger are "events". There are several types of events: s3, DynamoDB, custom applications. - Parameters: AWS functions receive different parameters depending on the language but, in general, all the functions receive at least this two: - "event"/"input": This is a blob in which the function receives the information to process. The blob could contain any structure and that will depend on the event source. - "context": General information about the function environment. E.g Invoke ID, function version, function ARN… - OpenWhisk: - Protocol: A Kafka service transforms HTTP Requests to Kafka messages in any case. - Parameters: Functions receive a single argument "parameters" that contains a blob with the body of the HTTP request. - Fission: - Protocol: Functions request are received in the runtimes as HTTP request what gives the opportunity to give a response directly. - Parameters: All the functions receive an object "context". This object has different properties depending on the runtime but as minimum it has a property "request" to read inputs and "response" to answer them. Regarding the available solutions and the current architecture of Kubeless we choose a similar solution to Fission/OpenWhisk/Lambda: Use the HTTP protocol to communicate runtimes and triggers and expose at least two parameters (explained below). We can tweak this parameter depending on the runtime language in order to give different functionalities (if needed). This is the simplest solution for our use case, the most flexible and it is easy to use. As specific proposal, the parameter should contain at least the information about the request. The properties of the request object will change depending on the runtime and the trigger source but in any case it should contain a parameter with the inputs of the request (the body of a HTTP POST or the message of a Kafka entry). ### Function input Following the above premises and the [CNCF suggestion](https://docs.google.com/document/d/1UjW8bt5O8QBgQRILJVKZJej_IuNnxl20AJu9wA8wcdI/edit#heading=h.3s49zyc) this can be the a possible implementation for the input object that functions will receive the following schema (represented in JSON but the serialization may vary depending on the language): ```json { "event": { ["key": "value"], "source": "string", ["content-type": "string"], ["path": "string"], ["method": "string"], ["headers": "object"], ["topic": "string"], ... }, "context": { "function-name": "string", "runtime": "string", "namespace": "string", "memory-limit": "string", ["schedule": "string"], ["logger": "object"], ... } } ``` Note: Properties with brackets can be empty. Any required property can be added in the future maintaining backwards compatibility. - Event: Information about the request - "Key": Used to send data to the function, can be any "key" identifier. For example a message `{"message": "Hello world!"}` will be read in the function as `event.message`. That's the way Lambda and Openwhisk handle parameters. - Source: Event emmiter information - Content-type: Explicit content type - Path: (HTTP request only) Path of the call - Method: (HTTP request only) HTTP method used (GET, POST, PUT…) - Headers: (HTTP request only) Request headers - Topic: (PubSub only) Topic of the request - Context: Information about the function - Function name: ID of the function - Runtime: Runtime ID and version - Namespace: Kubernetes namespace used - Memory limit: Pod memory limit - Schedule: Function schedule - Logger: (To be implemented) Once we have a way to store and retrieve log this interface should implement the basic methods to write/read them. For the moment we will continue working with stdout so we won’t include this property in the first version. ================================================ FILE: docs/proposals/http-triggers.md ================================================ # http trigger improvements Though there is no standard on what http/https triggers of FAAS platform should support, most hosted FAAS solution like AWS Lambda, google cloud functions, IBM cloud functions, Azure functions etc provide common functionality - http/https endpoint for the function: a fully qualified URL is automatically generated and assigned to an HTTP triggered Cloud Function which can retrieved through respective cli or consoles - ability to customize the endpoint url by specifying route - a way to authenticate and authorize the function invoker through url - a way to restrict http method GET/POST/DELETE etc used to invoke function - stage and version functions Kubeless already supports `--trigger-http`. It does seem reasonable to expect similar functionality with kubeless for http triggers. This proposal would like to articulate current gaps and suggest changes. ## Challenges - In typical FAAS platforms you have [API gateway](https://martinfowler.com/articles/serverless.html) or router (for e.g AWS API gateway) which receives the requests and calls the relevant FaaS function. Some times also perform authentication, input validation, response code mapping, etc. Control path (rest endpoint to create/update/delete functions) and data path (invoking function and getting response) are either combined into one enity or sepearated. Kubeless as native Kubernetes solution intelligently leverages kubernetes constructs and offloads control path (Kubernetes API server through CRD) and data path (through services). While it helps in many aspects, it also means we are constrained by kubernetes constructs. For e.g authenticating the function caller. - Leveraging kubernetes service as data path to call the function means we need to deal with various service types of Kubernetes for various scenarios. For e.g, a function deployed with Kubeless, if its only caller is microservice running in-cluster then perhaps service of `clusterIP` is needed. If you expect out of cluster callers but does not care about L7 then service of type NodePort is enough. For cases where you want L7, tls etc then Kubeless already leverages Kubernetes Ingress. Also there is PR to support headless service which make sense for baremetal deployment. While Kubeless should be flexible to allow differnet use-cases, it will be challenging to generate a http endpoint for the function. - On managed FAAS platform, since function user/developer is completly taken out of the infrastrcutre ops there is clear seperation of concerns. Kubeless leveraging K8S constructs, conscious effort must be put not to splill infrastructure or k8s concpets on to the function user. In other words be mindful of two personas using kubeless: function user and cluster/kubeless deployment operator. ## Gaps While its debatable whats the desirable kubeless view of http-triggers, here are current gaps from the point of view of this proposal. * tight coupling of function routing with kubernetes ingress.So what if some one does not want ingress (i.e want to use node port, headless service etc) * `kubeless ingress`: function user explicitly dealing with ingress * though optional `--hostname` flag for `kubeless ingress`, why shoud function user be aware of ingress object hosts? * function user specifying the tls flags - kubeless client creates ingress objects - as function user how do i know what http/https endpoint for my function. alternatively how controller can generate consitently the URL for the function. - as a cluster operator how do i tell which ingress controller to use, or how do i customize my ingress ## proposed changes - [formal specification](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) of the function spec that is devoid of any k8s/infrastcture constucts for writable fields - function user just do the route management i.e) express desired path for the function. Function spec to carry the function user intent ie. path to function mapping. - rename `kubeless ingress` to `kubeless route` - move the ingress object creation to kubeless controller. - kubeless controller that has ability to provision service (as cluster ip, node port, load balancer or headless) backing the function as desired by the cluster operator - introduce configmap for the controller that cluster oprator can use to configure controller. for e.g details like which ingress controller to use - controllers ability consistently generate http endpoint for the function irrespective the service type backing the function and whether kubernetes ingress is used or not - clean up the current kubeless flags related to ingress ## what to do we achieve? - small step toward some of the comman functionality of http triggers in other FAAS platforms - clean separation of concerns of function user and cluster/kubeless operator - extensibility of controller (where applicable) with configmap ## tracking issues - [#417](https://github.com/kubeless/kubeless/issues/417) kubeless list option should give info on http/https endpoint - [#476](https://github.com/kubeless/kubeless/issues/476) move ingress object creation to ingress controller - [#474](https://github.com/kubeless/kubeless/issues/474) support flexible service types for the service backing functions - [#475](https://github.com/kubeless/kubeless/issues/475) introduce configmap for kubeless-controller - [#478](https://github.com/kubeless/kubeless/issues/478) rename `ingress` command to `route` ================================================ FILE: docs/pubsub-functions.md ================================================ # PubSub events You can trigger any Kubeless function by a PubSub mechanism. The PubSub function is expected to consume input messages from a predefined topic from a messaging system. Kubeless currently supports using events from Kafka and NATS messaging systems. ## Kafka In Kafka [release page](https://github.com/kubeless/kafka-trigger/releases), you can find the manifest to quickly deploy a collection of Kafka and Zookeeper statefulsets. If you have a Kafka cluster already running in the same Kubernetes environment, you can also deploy PubSub function with it. Check out [this tutorial](/docs/use-existing-kafka) for more details how to do that. If you want to deploy the manifest we provide to deploy Kafka and Zookeeper execute the following command: ```console $ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/kafka-trigger/releases/latest | grep tag_name | cut -d '"' -f 4) $ kubectl create -f https://github.com/kubeless/kafka-trigger/releases/download/$RELEASE/kafka-zookeeper-$RELEASE.yaml ``` > NOTE: Kafka statefulset uses a PVC (persistent volume claim). Depending on the configuration of your cluster you may need to provision a PV (Persistent Volume) that matches the PVC or configure dynamic storage provisioning. Otherwise Kafka pod will fail to get scheduled. Also note that Kafka is only required for PubSub functions, you can still use http triggered functions. Please refer to [PV](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) documentation on how to provision storage for PVC. Once deployed, you can verify two statefulsets up and running: ``` $ kubectl -n kubeless get statefulset NAME DESIRED CURRENT AGE kafka 1 1 40s zoo 1 1 42s $ kubectl -n kubeless get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE broker ClusterIP None 9092/TCP 1m kafka ClusterIP 10.55.250.89 9092/TCP 1m zoo ClusterIP None 9092/TCP,3888/TCP 1m zookeeper ClusterIP 10.55.249.102 2181/TCP 1m ``` A function can be as simple as: ```python def foobar(event, context): print event['data'] return event['data'] ``` Now you can deploy a pubsub function. ```console $ kubeless function deploy test --runtime python2.7 \ --handler test.foobar \ --from-file test.py ``` You need to create a _Kafka_ trigger that lets you associate a function with a topic specified by `--trigger-topic` as below: ```console $ kubeless trigger kafka create test --function-selector created-by=kubeless,function=test --trigger-topic test-topic ``` After that you can invoke the function by publishing messages in that topic. To allow you to easily manage topics `kubeless` provides a convenience function `kubeless topic`. You can create/delete and publish to a topic easily. ```console $ kubeless topic create test-topic $ kubeless topic publish --topic test-topic --data "Hello World!" ``` You can check the result in the pod logs: ```console $ kubectl logs test-695251588-cxwmc ... Hello World! ``` ## NATS If you do not have NATS cluster its pretty easy to setup a NATS cluster. Run below command to deploy a [NATS operator](https://github.com/nats-io/nats-operator) ```console $ kubectl apply -f https://github.com/nats-io/nats-operator/releases/latest/download/10-deployment.yaml ``` Once NATS operator is up and running run below command to deploy a NATS cluster ```console echo ' apiVersion: "nats.io/v1alpha2" kind: "NatsCluster" metadata: name: "nats" spec: size: 3 version: "1.1.0" ' | kubectl apply -f - -n nats-io ``` Above command will create NATS cluster IP service `nats.nats-io.svc.cluster.local:4222` which is the default URL Kubeless NATS trigger contoller expects. Now use this manifest to deploy Kubeless NATS triggers controller. ```console $ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/nats-trigger/releases/latest | grep tag_name | cut -d '"' -f 4) $ kubectl create -f https://github.com/kubeless/nats-trigger/releases/download/$RELEASE/nats-$RELEASE.yaml ``` By default NATS trigger controller expects NATS cluster is available as Kubernetes cluster service `nats.nats-io.svc.cluster.local:4222`. You can overide the default NATS cluster url used by setting the environment variable `NATS_URL` in the manifest. Once NATS trigger controller is setup you can deploy the function and associate function with a topic on the NATS cluster. ```console $ kubeless function deploy pubsub-python-nats --runtime python2.7 \ --handler test.foobar \ --from-file test.py ``` After function is deployed you can use `kubeless trigger nats` CLI command to associate function with a topic on NATS cluster as below. ```console $ kubeless trigger nats create pubsub-python-nats --function-selector created-by=kubeless,function=pubsub-python-nats --trigger-topic test ``` At this point you are all set try Kubeless NATS triggers. You could quickly test the functionality by publishing a message to the topic, and verifying that message is seen by the pod running the function. ```console $ kubeless trigger nats publish --url nats://nats-server-ip:4222 --topic test --message "Hello World!" ``` You can check the result in the pod logs: ```console $ kubectl logs pubsub-python-nats-5b9c849fc-tvq2l ... Hello World! ``` ## Other commands You can create, list and delete PubSub topics (for Kafka): ```console $ kubeless topic create another-topic Created topic "another-topic". $ kubeless topic delete another-topic $ kubeless topic ls ``` ================================================ FILE: docs/quick-start.md ================================================ # Installation Installation is made of three steps: * Download the `kubeless` CLI from the [release page](https://github.com/kubeless/kubeless/releases). * Create a `kubeless` namespace (used by default) * Then use one of the YAML manifests found in the release page to deploy kubeless. It will create a _functions_ Custom Resource Definition and launch a controller. There are several kubeless manifests being shipped for multiple k8s environments (non-rbac, rbac and openshift), pick the one that corresponds to your environment: * `kubeless-$RELEASE.yaml` is used for RBAC Kubernetes cluster. * `kubeless-non-rbac-$RELEASE.yaml` is used for non-RBAC Kubernetes cluster. * `kubeless-openshift-$RELEASE.yaml` is used to deploy Kubeless to OpenShift (1.5+). For example, this below is a show case of deploying kubeless to a Kubernetes cluster (with RBAC available). ```console $ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/kubeless/releases/latest | grep tag_name | cut -d '"' -f 4) $ kubectl create ns kubeless $ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml $ kubectl get pods -n kubeless NAME READY STATUS RESTARTS AGE kubeless-controller-manager-567dcb6c48-ssx8x 1/1 Running 0 1h $ kubectl get deployment -n kubeless NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE kubeless-controller-manager 1 1 1 1 1h $ kubectl get customresourcedefinition NAME AGE cronjobtriggers.kubeless.io 1h functions.kubeless.io 1h httptriggers.kubeless.io 1h ``` > Details on [installing kubeless in a different namespace](/docs/function-controller-configuration#install-kubeless-in-different-namespace) can be found here. For installing `kubeless` CLI using execute: #### Linux and macOS ```console export OS=$(uname -s| tr '[:upper:]' '[:lower:]') curl -OL https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless_$OS-amd64.zip && \ unzip kubeless_$OS-amd64.zip && \ sudo mv bundles/kubeless_$OS-amd64/kubeless /usr/local/bin/ ``` Binaries for x86 architectures can be found as well [in the releases page](https://github.com/kubeless/kubeless/releases). #### Windows 1. Download the latest release from [the releases page](https://github.com/kubeless/kubeless/releases). 2. Extract the content and add the `kubeless` binary to the system PATH. You are now ready to create functions. # Sample function You can use the CLI to create a function. Here is a toy: ```python def hello(event, context): print (event) return event['data'] ``` Functions in Kubeless have the same format regardless of the language of the function or the event source. In general, every function: - Receives an object `event` as their first parameter. This parameter includes all the information regarding the event source. In particular, the key 'data' should contain the body of the function request. - Receives a second object `context` with general information about the function. - Returns a string/object that will be used as response for the caller. You can find more details about the function interface [here](/docs/kubeless-functions#functions-interface) You create it with: ```console $ kubeless function deploy hello --runtime python3.8 \ --from-file test.py \ --handler test.hello INFO[0000] Deploying function... INFO[0000] Function hello submitted for deployment INFO[0000] Check the deployment status executing 'kubeless function ls hello' ``` Let's dissect the command: * `hello`: This is the name of the function we want to deploy. * `--runtime python3.8`: This is the runtime we want to use to run our function. Available runtimes can be found executing `kubeless get-server-config`. * `--from-file test.py`: This is the file containing the function code. Specifying a zip file or a gzip/bzip2/xz compressed tar file (see [list of supported suffixes](https://en.wikipedia.org/wiki/Tar_(computing)#Suffixes_for_compressed_files) for compressed tar files) is supported as long as it doesn't exceed the maximum size for an etcd entry (1 MB). * `--handler test.hello`: This specifies the file and the exposed function that will be used when receiving requests. In this example we are using the function `hello` from the file `test.py`. You can find the rest of options available when deploying a function executing `kubeless function deploy --help` You will see the function custom resource created: ```console $ kubectl get functions NAME AGE hello 1h $ kubeless function ls NAME NAMESPACE HANDLER RUNTIME DEPENDENCIES STATUS hello default helloget.foo python3.8 1/1 READY ``` You can then call the function with: ```console $ kubeless function call hello --data 'Hello world!' Hello world! ``` Or you can curl directly with `kubectl proxy`using an [apiserver proxy URL](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#manually-constructing-apiserver-proxy-urls). For example: ```console $ kubectl proxy -p 8080 & $ curl -L --data '{"Another": "Echo"}' \ --header "Content-Type:application/json" \ localhost:8080/api/v1/namespaces/default/services/hello:http-function-port/proxy/ {"Another": "Echo"} ``` Kubeless also supports [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) which means you can provide your custom URL to the function. Please refer to [this doc](/docs/http-triggers) for more details. ## Clean up You can delete the function and uninstall Kubeless: ```console $ kubeless function delete hello $ kubeless function ls NAME NAMESPACE HANDLER RUNTIME DEPENDENCIES STATUS $ kubectl delete -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml ``` ## Examples See the [examples](https://github.com/kubeless/kubeless/tree/master/examples) directory for a list of simple examples in all the languages supported. NodeJS, Python, Golang etc ... Also checkout the [functions repository](https://github.com/kubeless/functions), where we're building a library of ready to use kubeless examples, including an [incubator](https://github.com/kubeless/functions/tree/master/incubator) to encourage contributions from the community - **your PR is welcome** ! :) ================================================ FILE: docs/release-flow.md ================================================ # Introduction Kubeless leverages [travis-ci](https://travis-ci.org/) to construct an automated release flow. A release package includes kubeless binaries for multiple platforms (linux and osx are supported) and one yaml file to deploy kubeless controller. # Checks before releasing Before releasing it is necessary to check that the rest of projects of the Kubeless environment do not present regressions for the new changes. Before creating a new release, deploy Kubeless using the latest commit of master (using the tag "latest" for the controller image). Make sure that the latest image build in Travis for the Kubeless controller is being used. After that, ensure that the following projects support the new version: - [Serverless Plugin](https://github.com/serverless/serverless-kubeless) - [Kubeless UI](https://github.com/kubeless/kubeless-ui) If any error is found after doing some manual testing, make sure the error is addressed before doing a release. # Kubeless release flow A release is triggered by [Travis Github Releases](https://docs.travis-ci.com/user/deployment/releases/) and based on GitHub tagging. Once a commit in the master branch is tagged, a travis job will be started to build and upload assets to Github release page under a new release with the tag name. The setup is described at `before_deploy` and `deploy` sections in `.travis.yaml`. `before_deploy` defines commands executed before releasing. At this stage, we prepare assets which will be uploaded including kubeless binaries and the yaml file. The yaml file is converted from [kubeless.jsonnet](https://github.com/kubeless/kubeless/blob/master/kubeless.jsonnet) file using [kubecfg](https://github.com/ksonnet/kubecfg). The kubeless-controller is built in format of docker image and push to [Bitnami repository](https://hub.docker.com/r/bitnami/kubeless-controller/) on DockerHub. Because we use sha256 digest for labeling docker images to be deployed when installing kubeless, we need to update these digests for the new release. `deploy` defines configuration for a github release. API key is encrypted version of our Github token with scope `public_repo`. The condition for a release to be triggered is defined at `on` section: - it will be triggered once a commit is tagged - the repository is `kubeless/kubeless` - only travis job for `os: linux` and `go: 1.8` can do the release Once the release job has finished a `Draft` with the release notes will appear in the [releases page](https://github.com/kubeless/kubeless/releases). Review the notes and include a summary of the changes included in the release. Delete information that is not useful for the users. Make sure that breaking changes are properly highlighted. After that click on "Publish" for making the new release available for anyone. # Update the rest of projects to use the new version _Note: These steps are suitable for being automated in the Travis release job_ Once the new version is available, there are several projects/files that require to be updated in order to point to the latest version: - Kubeless docs site: To point to the latest version in the docs of http://kubeless.io rebuild the last build on https://travis-ci.org/kubeless/kubeless-website. - Kubeless chart: Update the references for the different images or any other required change in the `chart` folder of this repository. - Serverless plugin: Update the `KUBELESS_VERSION` environment variable in the `.travis` file to point to the latest version. - [Optional] Brew recipes: An automated PR will be generated in the `homebrew-core` repository with the new version and commit ID. Unless the recipe should contain breaking changes the update will be handled by the homebrew team. If it is not the case the [recipe](https://github.com/Homebrew/homebrew-core/blob/master/Formula/kubeless.rb) manually. ================================================ FILE: docs/runtimes.md ================================================ # Kubeless Runtime Variants By default Kubeless has support for runtimes in different states: stable and incubator. You can find the different runtimes available in this repository: [https://github.com/kubeless/runtimes](https://github.com/kubeless/runtimes). You can also see the list of supported runtimes that your Kubeless installation can use executing: ```console $ kubeless get-server-config INFO[0000] Current Server Config: INFO[0000] Supported Runtimes are: python2.7, python3.4, python3.6, nodejs6, nodejs8, ruby2.3, ruby2.4, ruby2.5, php7.2, go1.10, dotnetcore2.0, java1.8, ballerina0.981.0 ``` Each runtime is encapsulated in a container image. The reference to these images are injected in the Kubeless configuration. ### NodeJS #### Example ```js module.exports = { foo: function (event, context) { console.log(event); return event.data; } } ``` #### Description NodeJS functions should export the desired method using `module.exports`. You can specify dependencies using a `package.json` file. It is also possible to return an object instead of a string, this object will be stringified before returning. When using the Node.js runtime, it is possible to configure a [custom registry or scope](https://docs.npmjs.com/misc/scope#associating-a-scope-with-a-registry) in case a function needs to install modules from a different source. For doing so it is necessary to set up the environment variables *NPM_REGISTRY* and *NPM_SCOPE* when deploying the function: ```console $ kubeless function deploy myFunction --runtime nodejs6 \ --env NPM_REGISTRY=http://my-registry.com \ --env NPM_SCOPE=@myorg \ --dependencies package.json \ --handler test.foo \ --from-file test.js ``` It's also possible to add another piece of configuration for your NPM file if the variable `NPM_CONFIG_EXTRA` is set. In case it's used, the build process will execute `npm config set $NPM_CONFIG_EXTRA` before installing dependencies. Depending on the size of the payload sent to the NodeJS function it is possible to find the error `413 PayloadTooLargeError`. It is possible to increase this limit setting the environment variable `REQ_MB_LIMIT`. This will define the maximum size in MB that the function will accept: ```console $ kubeless function deploy myFunction --runtime nodejs6 \ --env REQ_MB_LIMIT=50 \ --handler test.foo \ --from-file test.js ``` **For Webpack Users** Your webpacked functions will be `require()`-d in so your bundle should work out of the box. However, if your bundle size is approaching 1mb you should take advantage of Kubeless' ability to install dependencies for you instead of bundling them all into your payload. You will need to customize your webpack config to suit your own project, but below is an sample config of how to achieve this in Webpack 4.x: _webpack.config.js_ ```js const path = require("path"); const nodeExternals = require("webpack-node-externals"); const CopyWebpackPlugin = require("copy-webpack-plugin"); module.exports = { entry: { handlers: "./handlers.js" }, node: { __filename: true, __dirname: true }, target: "node", // do not include dependencies in the bundle externals: [nodeExternals()], devtool: "source-map", module: { rules: [ { test: /\.js$/, use: "babel-loader", // do not transpile the depedencies exclude: /node_modules/ } ] }, plugins: [ // do include the project's `package.json` in the bundle new CopyWebpackPlugin([ { from: path.join(__dirname, "path", "to", "your", "package.json"), to: "package.json" } ]) ] }; ``` Additionally, in your babel config, you can specify the transpile target to be the version of node you're using for your runtime. This is an example for Babel 7.x: ```js module.exports = { plugins: [ "@babel/plugin-proposal-class-properties", "@babel/plugin-proposal-object-rest-spread", "@babel/plugin-syntax-dynamic-import", "@babel/plugin-transform-runtime" ], // note the target node version here for nodejs8 presets: [["@babel/preset-env", { targets: { node: "8.10" } }]] }; ``` #### Server implementation For the Node.js runtime we start an [Express](http://expressjs.com) server and we include the routes for serving the health check and exposing the monitoring metrics. Apart from that we enable [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS) requests and [Morgan](https://github.com/expressjs/morgan) for handling the logging in the server. Monitoring is supported if the function is synchronous or if it uses promises. #### Distroless Variant There is the [distroless](https://github.com/GoogleContainerTools/distroless) variant of the Node.js 8 runtime. The distroless Node.js runtime contains only the kubeless function and its runtime dependencies. In particular, this variant does not contain package manager, shells or any other programs which are part of a standard Linux distribution. The same example Node.js function from above can then be deployed: ```console $ kubeless function deploy myFunction --runtime nodejs_distroless8 \ --env NPM_REGISTRY=http://my-registry.com \ --env NPM_SCOPE=@myorg \ --dependencies package.json \ --handler test.foo \ --from-file test.js ``` #### CloudEvents 0.1 Variant [CloudEvents](https://cloudevents.io) is a CNCF effort to standardize the way events are represented in the Cloud. There is a variant of the Node.js 8 runtime that is ready to receive events that follow that specification (v0.1). This variant expects the header `application/cloudevents+json` in order to be parsed as a JSON cloud event or the different headers that are defined in the [specification](https://github.com/cloudevents/spec/blob/master/spec.md) adapting them to the Kubeless function format. The same example Node.js function from above can then be deployed: ```console $ kubeless function deploy myFunction --runtime nodejsCE8 \ --dependencies package.json \ --handler test.foo \ --from-file test.js ``` ### Python #### Example ```py def handler(event, context): print (event) return event['data'] ``` #### Description Python functions should define the desired method. You can specify dependencies using a `requirements.txt` file. #### Server implementation For python we use [Bottle](https://bottlepy.org) and we also add routes for health check and monitoring metrics. ### Ruby #### Example ```rb def handler(event, context) puts event JSON.generate(event[:data]) end ``` #### Description Ruby functions should define the desired method. You can specify dependencies using a `Gemfile` file. #### Server implementation For the case of Ruby we use [Sinatra](http://www.sinatrarb.com) as web framework and we add the routes required for the function and the health check. Monitoring is currently not supported yet for this framework. PR is welcome :-) ### Go #### Example ```go package kubeless import "github.com/kubeless/kubeless/pkg/functions" func Handler(event functions.Event, context functions.Context) (string, error) { return event.Data, nil } ``` #### Description Go functions require to import the package `github.com/kubeless/kubeless/pkg/functions` that is used to define the input parameters. The desired method should be exported in the package. You can specify dependencies using [go modules](https://blog.golang.org/using-go-modules). #### Go with Dependency Example This is an example of a function using the `github.com/sirupsen/logrus` dependency. ```go // hellowithdeps.go package kubeless import ( "github.com/kubeless/kubeless/pkg/functions" "github.com/sirupsen/logrus" ) // Hello sample function with dependencies func Hello(event functions.Event, context functions.Context) (string, error) { logrus.Info(event.Data) return "Hello world!", nil } ``` ```go //go.mod module function go 1.14 require ( github.com/sirupsen/logrus v1.6.0 ) ``` ```bash kubeless function deploy get-go-deps --runtime go1.14 --handler hellowithdeps.Hello --from-file hellowithdeps.go --dependencies go.mod ``` #### Server implementation The Go HTTP server doesn't include any framework since the native packages includes enough functionality to fit our needs. Since there is not a standard package that manages server logs that functionality is implemented in the same server. It is also required to implement the `ResponseWriter` interface in order to retrieve the Status Code of the response. Apart from that we enable [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS) to accept any request. #### Debugging compilation If there is an error during the compilation of a function, the error message will be dumped to the termination log. If you see that the pod is crashed in a init container: ```console NAME READY STATUS RESTARTS AGE get-go-6774465f95-x55lw 0/1 Init:CrashLoopBackOff 1 1m ``` That can mean that the compilation failed. You can obtain the compilation logs executing: ```console $ kubectl get pod -l function=get-go -o yaml ... - containerID: docker://253fb677da4c3106780d8be225eeb5abf934a961af0d64168afe98159e0338c0 image: andresmgot/go-init:1.10 lastState: terminated: containerID: docker://253fb677da4c3106780d8be225eeb5abf934a961af0d64168afe98159e0338c0 exitCode: 2 finishedAt: 2018-04-06T09:01:16Z message: | # kubeless /go/src/kubeless/handler.go:6:1: syntax error: non-declaration statement outside function body ... ``` You can see there that there is a syntax error in the line 6 of the function. You can also retrieve the same information with this one-liner: ```console $ kubectl get pod -l function=get-go -o go-template="{{range .items}}{{range .status.initContainerStatuses}}{{.lastState.terminated.message}}{{end}}{{end}}" # kubeless /go/src/kubeless/handler.go:6:1: syntax error: non-declaration statement outside function body ``` #### Timeout handling One peculiarity of the Go runtime is that the user has a `Context` object as part of the `Event.Extensions` parameter. This can be used to handle timeouts in the function. For example: ```go func Foo(event functions.Event, context functions.Context) (string, error) { select { case <-event.Extensions.Context.Done(): return "", nil case <-time.After(5 * time.Second): } return "Function returned after 5 seconds", nil } ``` If the function above has a timeout smaller than 5 seconds it will exit and the code after the `select{}` won't be executed. ### Java #### Example ```java package io.kubeless; import io.kubeless.Event; import io.kubeless.Context; public class Foo { public String foo(io.kubeless.Event event, io.kubeless.Context context) { return "Hello world!"; } } ``` #### Description Java functions must use `io.kubeless` as package and should import both `io.kubeless.Event` and `io.kubeless.Context` packages. Function should be made part of a public class and should have a function signature that takes `Event` and `Context` as inputs and produces `String` output. Once you have Java function meeting the requirements it can be deployed with Kubeless as below. Where handler part `--handler Foo.foo` takes `Classname.Methodname` format. ```cmd kubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file Foo.java ``` Kubeless supports Java functions with dependencies. Kubeless uses Maven for both dependency management and building user given functions. Users are expected to provide function dependencies expresses in Maven pom.xml format. Lets take Java function with dependency on `org.joda.time.LocalTime`. ```java package io.kubeless; import io.kubeless.Event; import io.kubeless.Context; import org.joda.time.LocalTime; public class Hello { public String sayHello(io.kubeless.Event event, io.kubeless.Context context) { System.out.println(event.Data); LocalTime currentTime = new LocalTime(); return "Hello world! Current local time is: " + currentTime; } } ``` #### Dependencies Dependencies are expressed through standard Maven pom.xml file format as below. ```xml 4.0.0 function function 1.0-SNAPSHOT joda-time joda-time 2.9.2 io.kubeless params 1.0-SNAPSHOT io.kubeless kubeless 1.0-SNAPSHOT ``` Notice the reference to `kubeless` parent pom module and dependency on `params` artifact. pom.xml should also use `function` as artifact ID. Once you have Java function with dependencies and pom.xml file expressing the dependencies Java function can be deployed with Kubeless as below. ```cmd kubeless function deploy get-java-deps --runtime java1.8 --handler Hello.sayHello --from-file java/HelloWithDeps.java --dependencies java/pom.xml ``` > Note: Maven command line arguments can be set using environment flag `--env`. For instance proxy details can be set as ```cmd kubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file Foo.java --env MAVEN_OPTS='-DproxySet=true -DproxyHost= -DproxyPort=' ``` ### .NET Core (C#) #### Example ```csharp using System; using Kubeless.Functions; public class module { public object handler(Event k8Event, Context k8Context) { return k8Event.Data; } } ``` Deploy it using the following command: ```bash kubeless function deploy helloget --from-file helloget.cs --handler module.handler --runtime dotnetcore2.0 ``` #### Description To get started using .NET Core with kubeless, you should use the following commands: ```bash dotnet new library dotnet add package Kubeless.Functions ``` .NET Core (C#) functions supports returns for any primitive or complex type. The method signature needs to have first an `Kubeless.Functions.Event` followed by an `Kubeless.Functions.Context`. The models are definied as it follows: ```csharp public class Context { public string ModuleName { get; } public string FunctionName { get; } public string FunctionPort { get; } public string Timeout { get; } public string Runtime { get; } public string MemoryLimit { get; } } ``` ```csharp public class Event { public object Data { get; } public string EventId { get; } public string EventType { get; } public string EventTime { get; } public string EventNamespace { get; } public Extensions Extensions { get; } } ``` #### Dependencies Dependencies are handled in `.csproj` extension. You can use the regular `.csproj` file outputted by the `dotnet new library` command. ```xml netstandard2.0 ``` The runtime already have built-in the package `Kubeless.Functions:0.1.1`, necessary to all functions - so you don't need to include that. Then, if you have a function which does not need any external references than `Kubeless.Functions`, you don't need to even send the `--dependencies` flag on kubeless cli. You can deploy them using the command: ```bash kubeless function deploy fibonacci --from-file fibonacci.cs --handler module.handler --dependencies fibonacci.csproj --runtime dotnetcore2.0 ``` ##### `nuget.config` If you happen to be using custom nuget repositories through a `nuget.config` file, you'll need to include the file along with the code inside a `.zip` file and then you can deploy the function with the `nuget.config` using the command: ```bash kubeless function deploy custom-deps --from-file custom-deps.zip --handler module.handler --dependencies custom-deps.csproj --runtime dotnetcore2.0 ``` ### Ballerina #### Example ```ballerina import kubeless/kubeless; import ballerina/io; public function foo(kubeless:Event event, kubeless:Context context) returns (string|error) { io:println(event); io:println(context); return "Hello Ballerina"; } ``` #### Description The Ballerina functions should import the package `kubeless/kubeless`. This [package](https://central.ballerina.io/kubeless/kubeless) contains two types `Event` and `Context`. ```console $ kubeless function deploy foo --runtime ballerina0.981.0 --from-file foo.bal --handler foo.foo ``` When using the Ballerina runtime, it is possible to provide a configuration via `kubeless.toml` file. The values in kubeless.toml file are available for the function. The function(.bal file) and conf file should be in the same directory. The zip file containing both files should be passed to the Kubeless CLI. ```console foo ├── hellowithconf.bal └── kubeless.toml $ zip -r -j foo.zip foo/ $ kubeless function deploy foo --runtime ballerina0.981.0 --from-file foo.zip --handler hellowithconf.foo ``` #### Server implementation For the Ballerina runtime we start a [Ballerina HTTP server](../docker/runtime/ballerina/kubeless_run.tpl.bal) with two resources, '/' and '/healthz'. ## Use a custom runtime The Kubeless configuration defines a set of default container images per supported runtime variant. These default container images can be configured via Kubernetes environment variables on the Kubeless controller's deployment container. Or modifying the `kubeless-config` ConfigMap that is deployed along with the Kubeless controller. For more information about how to modify the Kubeless configuration check [this guide](https://kubeless.io/docs/function-controller-configuration/). Apart than changing the configuration, it is possible to use a custom runtime specifying the image that the function will use. If you are interested in developing a new runtime from scratch (i.e. for a new language) you should follow [this guide](https://kubeless.io/docs/implementing-new-runtime/). In the linked guide you can find the requirements that a new runtime should fulfill and how you can submit new runtimes to the Kubeless project. In any case, if you want to use one of the existing runtimes but you want to modify it to support a specific feature you can easily do that. The first thing is to modify the files in [`docker/runtime`](https://github.com/kubeless/kubeless/tree/master/docker/runtime) folder. For example, if we want to add the `lodash` `npm` module globally in the NodeJS runtime we can modify its [Dockerfile](https://github.com/kubeless/kubeless/tree/master/docker/runtime/nodejs/Dockerfile.8): ```patch ... RUN apt-get update && apt-get install git + RUN npm install -g lodash ... ``` Now we can use the Makefile in the folder to generate the base image: ```console ▶ make build8 docker build -t kubeless/nodejs:8$RUNTIME_TAG_MODIFIER -f Dockerfile.8 . Sending build context to Docker daemon 7.059MB Step 1/10 : FROM node:8 ---> 55791187f71c Step 2/10 : RUN apt-get update && apt-get install git ---> Using cache ---> 70f1565e9353 Step 3/10 : RUN npm install -g lodash ---> Running in 03602280a37d + lodash@4.17.10 added 1 package in 1.369s ... Successfully built d68eccb2568b Successfully tagged kubeless/nodejs:8 ``` We can now retag the image and push it using a different account: ```console ▶ docker tag kubeless/nodejs:8 andresmgot/nodejs-with-lodash:8 ▶ docker push andresmgot/nodejs-with-lodash:8 The push refers to repository [docker.io/andresmgot/nodejs-with-lodash] 5a9aabfdd819: Pushed ... 8: digest: sha256:dfd26034130e5aae5a3db7b3df969649c44c3f7d1168bee7c4e1e6e7e75726d7 size: 3261 ``` Finally in order to use this new flavor we need to add it to the Kubeless config. We will just copy the official `nodejs` runtime and rename it to reflect the changes: ```console ▶ kubectl edit -n kubeless configmap kubeless-config # Add the following object within the "runtime-images" array # { # "ID": "nodejsWithLodash", # "compiled": false, # "versions": [ # { # "name": "node8", # "version": "8", # "runtimeImage": "andresmgot/nodejs-with-lodash:8", # "initImage": "node:8" # } # ], # "depName": "package.json", # "fileNameSuffix": ".js" # }, configmap "kubeless-config" edited ``` > NOTE: You should just use lowercase and uppercase characters for the ID. The runtime selection is made concatenating the runtime ID and the version (i.e. nodejsWithLodash8 for this example) The last step in order to deploy a function with the new runtime is to restart the Kubeless controller pod: ```console ▶ kubectl delete pods -n kubeless -l kubeless=controller pod "kubeless-controller-manager-67fbc78f6d-w2vnk" deleted ▶ kubeless function deploy my-nodejs-func --runtime nodejsWithLodash8 --handler helloget.foo --from-file examples/nodejs/helloget.js INFO[0000] Deploying function... INFO[0000] Function my-nodejs-func submitted for deployment INFO[0000] Check the deployment status executing 'kubeless function ls my-nodejs-func' # Wait for the function pod to be deployed ▶ kubectl exec -it my-nodejs-func-55546fcf68-78fpz -- npm list -g | grep lodash +-- lodash@4.17.10 ``` ## Use a custom livenessProbe One can use kubeless-config to override the default liveness probe. By default, the liveness probe is `http-get` this can be overriden by providing the livenessprobe info in `kubeless-confg` under `runtime-images`. It has been implemented in such a way that each runtime can have its own liveness probe info. To use custom liveness probe paste the following info in `runtime-images`: ```json "version": [], "livenessProbeInfo": { "exec": { "command": [ "curl", "-f", "http://localhost:8080/healthz" ] }, "initialDelaySeconds": 5, "periodSeconds": 5, "failureThreshold": 3, "timeoutSeconds": 30 }, "depname": "" ``` ================================================ FILE: docs/streaming-functions.md ================================================ # Data Stream events Kubeless lets you trigger any Kubeless function in response to ingested records into a data stream. Kubeless currently supports AWS Kinesis streaming service. ## AWS Kinesis To trigger Kubeless functions in response to ingested records into the AWS kinesis stream you need to deploy Kubeless AWS Kinesis trigger controller. Please use this manifest to deploy Kubeless AWS Kinesis trigger controller. ```console export RELEASE=$(curl -s https://api.github.com/repos/kubeless/kinesis-trigger/releases/latest | grep tag_name | cut -d '"' -f 4) kubectl create -f https://github.com/kubeless/kinesis-trigger/releases/download/$RELEASE/kinesis-$RELEASE.yaml ``` Once you deploy the manifest you shall see Kinesis trigger controller running in the Kubeless namespace as below. ```console $ kubectl get pods -n kubeless NAME READY STATUS RESTARTS AGE kinesis-trigger-controller-65c78f9f44-v5flq 1/1 Running 0 1h kubeless-controller-manager-6b7cdcdc76-x6gsd 1/1 Running 0 13h ``` You shall also notice a CRD resource type `kinesistriggers.kubeless.io` created as below. ```console $ kubectl get crd NAME AGE cronjobtriggers.kubeless.io 13h functions.kubeless.io 13h httptriggers.kubeless.io 13h kinesistriggers.kubeless.io 13h ``` Kubeless cli lets you create Kubeless triggers of Kinesis type. Kubeless cli provides necessary functionality to manage the life cycle of Kinesis triggers. ```console $ kubeless trigger kinesis --help kinesis trigger command allows users to create, list, update, delete Kinesis triggers running on Kubeless Usage: kubeless trigger kinesis SUBCOMMAND [flags] kubeless trigger kinesis [command] Available Commands: create Create a Kinesis trigger create-stream Create a Kinesis stream delete Delete a Kinesis trigger list list all Kinesis triggers deployed to Kubeless publish publish message to a Kinesis stream update Update a Kinesis trigger Flags: -h, --help help for kinesis Use "kubeless trigger kinesis [command] --help" for more information about a command. ``` In order to deploy a Kinesis trigger and associate a Kubeless function to be invoked in response to ingested records in Kinesis data stream, you need to first let Kubeless know the credentials required to acess your AWS Kinesis stream. Kubeless will leverage Kubernetes secrets to store the credentials in the cluster and use them to access the Kinesis stream. First you need to creat Kubernetes secret that can store you AWS `aws_access_key_id` and `aws_secret_access_key`. Usually if you are using AWS cli your keys will be present in `~/.aws/credentials` or you can create AWS access keys from AWS console. ```console kubectl create secret generic ec2 --from-literal=aws_access_key_id=$AWS_ACCESS_KEY_ID --from-literal=aws_secret_access_key=$AWS_SECRET_ACCESS_KEY ``` Once you have created a secret you are ready to deploy Kubeless Kinesis trigger as below. ```console kubeless trigger kinesis create test-trigger --function-name post-python --aws-region us-west-2 --shard-id shardId-000000000000 --stream my-kinesis-stream --secret ec2 ``` Lets look into the flags expected. `--aws-region` is the AWS region in which your Kinesis stream is avilable. `--shard-id` is the id of shard into which records are placed. You should be able to get the `shard-id` from the stream description. `--stream` is the name of the Kinesis stream. ```console $ aws kinesis describe-stream --stream-name my-kinesis-stream { "StreamDescription": { "RetentionPeriodHours": 24, "StreamName": "my-kinesis-stream", "Shards": [ { "ShardId": "shardId-000000000000", "HashKeyRange": { "EndingHashKey": "340282366920938463463374607431768211455", "StartingHashKey": "0" }, "SequenceNumberRange": { "StartingSequenceNumber": "49584495912138607235774073050889122383423872293029281794" } } ], "StreamARN": "arn:aws:kinesis:us-west-2:159706291352:stream/my-kinesis-stream", "EnhancedMonitoring": [ { "ShardLevelMetrics": [] } ], "StreamStatus": "ACTIVE" } } ``` Once you deploy the Kinesis trigger you shall see a `kinesistrigger` CRD object as below. ```console $ kubectl get kinesistriggers.kubeless.io test -o yaml apiVersion: kubeless.io/v1beta1 kind: KinesisTrigger metadata: labels: created-by: kubeless name: test namespace: default spec: aws-region: us-west-2 function-name: post-python secret: ec2 shard: shardId-000000000000 stream: my-kinesis-stream ``` At this point you shall be able to publish a record in to the stream either through Kubeless CLI or using AWS cli as below. ```console kubeless trigger kinesis publish --aws-region us-west-2 --secret ec2 --partition-key "123" --stream my-kinesis-stream --message "hello world" ``` or ```console aws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata1 aws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata2 aws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata3 ``` You shall see the log of received messages in the function pod associated with the Kinesis trigger. ```console $ kubectl logs post-python-59f7fc4b54-4nhbb Bottle v0.12.13 server starting up (using CherryPyServer())... Listening on http://0.0.0.0:8080/ Hit Ctrl-C to quit. {'event-time': '2018-05-18 05:40:42.881137473 +0000 UTC', 'extensions': {'request': }, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata12', 'event-id': 'bDRMSN3NPC81ktU'} 172.17.0.7 - - [18/May/2018:05:40:42 +0000] "POST / HTTP/1.1" 200 10 "" "Go-http-client/1.1" 0/11758 {'event-time': '2018-05-18 05:40:44.891994208 +0000 UTC', 'extensions': {'request': }, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata22', 'event-id': 'uHdiWN-lzeKYQyQ'} 172.17.0.7 - - [18/May/2018:05:40:44 +0000] "POST / HTTP/1.1" 200 10 "" "Go-http-client/1.1" 0/8983 {'event-time': '2018-05-18 05:40:45.878361324 +0000 UTC', 'extensions': {'request': }, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata32', 'event-id': 'sRRjSasGVApy8tA'} ``` ================================================ FILE: docs/triggers.md ================================================ # Triggers To invoke deployed functions, you need to create **triggers**. A function can have multiple triggers, but each of those will only reference a single deployed function. Each trigger has its own schema and usage, so we've created a separate page for each one of those. ## Available triggers In this section, we're going to list our triggers. Since Kubeless is an open-source tool there are multiple triggers that we haven't listed here. Feel free to add your trigger to this list. * [HTTP Trigger](/docs/http-triggers) * [CronJob Trigger](/docs/cronjob-triggers) * [PubSub Triggers](/docs/pubsub-functions) * [Kafka Trigger](/docs/pubsub-functions#kafka) * [NATS Trigger](/docs/pubsub-functions#nats) * [Data Stream Triggers](/docs/streaming-functions) * [AWS Kinesis Trigger](/docs/streaming-functions/#aws-kinesis) ## Creating a new trigger It is really simple to create a new trigger on Kubeless. Take a look at the [Implementing a New Trigger](/docs/implementing-new-trigger) page to learn more about it. ================================================ FILE: docs/troubleshooting.md ================================================ # Troubleshooting ## Installation If installing using ```console kubectl create -f kubeless.yaml --namespace kubeless ``` gives the following error: ```console customresourcedefinition "functions.k8s.io" created error: error validating "kubeless.yaml": error validating data: unknown object type schema.GroupVersionKind{Group:"", Version:"v1", Kind:"Service"}; if you choose to ignore these errors, turn validation off with --validate=false ``` You probably have an older version of Kubernetes. Make sure you are using at least version `1.7`. ## Kafka and Zookeeper Persistent Volume creation Since Kubeless 0.5, there is a standalone manifest for deploying Kafka and Zookeeper. In some platforms, the Persistent Volumes that these applications require are not automatically generated. If that is your case you will see the deployments and Persistent Volume Claims as Pending: ``` $ kubectl get pods -n kubeless NAME READY STATUS RESTARTS AGE kafka-0 1/1 Pending 0 1h kafka-trigger-controller-7f4f458f8b-l6f5m 1/1 Running 0 1h kubeless-controller-manager-58d78fff74-g7fsd 1/1 Running 0 1h zoo-0 1/1 Pending 0 1h $ kubectl get pvc -n kubeless NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE datadir-kafka-0 Pending 2m zookeeper-zoo-0 Pending 2m ``` If you are running Kubernetes in GKE check the specific guide [here](/docs/GKE-deployment) to create the required disks and PVs. In other case, check the provider documentation of how to create these required volumes. Note that `kafka` and `zookeeper` are only needed when working with Kafka events, you can still use Kubeless to trigger functions using HTTP requests. ================================================ FILE: docs/use-existing-kafka.md ================================================ # Use an existing Kafka cluster with Kubeless In Kubeless [release page](https://github.com/kubeless/kubeless/releases), we provide along with Kubeless manifests a collection of Kafka and Zookeeper statefulsets which helps user to quickly deploying PubSub function. These statefulsets are deployed in `kubeless` namespace. However, if you have a Kafka cluster already running in the same Kubernetes cluster, this doc will walk you through how to deploy Kubeless PubSub function with it. Let's assume that you have Kafka cluster running at `pubsub` namespace like below: ```console $ kubectl -n pubsub get po NAME READY STATUS RESTARTS AGE kafka-0 1/1 Running 0 7h zoo-0 1/1 Running 0 7h $ kubectl -n pubsub get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kafka ClusterIP 10.55.253.151 9092/TCP 7h zookeeper ClusterIP 10.55.248.146 2181/TCP 7h ``` **Note**: If you want to use the command `kubeless topic` you need add a label to your Kafka deployment (`kubeless=kafka`) in order for the CLI to find it. And Kubeless already running at `kubeless` namespace: ```console $ kubectl -n kubeless get po NAME READY STATUS RESTARTS AGE kubeless-controller-manager-58676964bb-l79gh 1/1 Running 0 5d ``` Now we need to deploy the Kafka consumer and the Kafka Trigger CRD. We can do that extracting the Deployment, CRD and ClusterRoles from the generic Kafka manifest. The key part is adding the environment variable `KAFKA_BROKERS` pointing to the right URL: ```yaml $ echo ' --- apiVersion: apps/v1beta1 kind: Deployment metadata: labels: kubeless: kafka-trigger-controller name: kafka-trigger-controller namespace: kubeless spec: selector: matchLabels: kubeless: kafka-trigger-controller template: metadata: labels: kubeless: kafka-trigger-controller spec: containers: - image: bitnami/kafka-trigger-controller:latest imagePullPolicy: IfNotPresent name: kafka-trigger-controller env: - name: KAFKA_BROKERS value: kafka.pubsub:9092 # CHANGE THIS! serviceAccountName: controller-acct --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: kafkatriggers.kubeless.io spec: group: kubeless.io names: kind: KafkaTrigger plural: kafkatriggers singular: kafkatrigger scope: Namespaced version: v1beta1 --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: kafka-controller-deployer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kafka-controller-deployer subjects: - kind: ServiceAccount name: controller-acct namespace: kubeless --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: kafka-controller-deployer rules: - apiGroups: - "" resources: - services - configmaps verbs: - get - list - apiGroups: - kubeless.io resources: - functions - kafkatriggers verbs: - get - list - watch - update - delete ' | kubectl create -f - deployment "kafka-trigger-controller" created clusterrolebinding "kafka-controller-deployer" created clusterrole "kafka-controller-deployer" created customresourcedefinition "kafkatriggers.kubeless.io" created ``` Now we need to create `s3-python` topic and try to publish some messages. You can do it on your own kafka client. In this example, I will try to use the bundled binaries in the kafka container: ```console # create s3-python topic $ kubectl -n pubsub exec -it kafka-0 -- /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper.pubsub:2181 --replication-factor 1 --partitions 1 --topic s3-python # send test message to s3-python topic $ kubectl -n pubsub exec -it kafka-0 -- /opt/bitnami/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic s3-python > hello world ``` Open another terminal and check for the pubsub function log to see if it receives the message: ```console $ kubectl logs -f pubsub-python-5445bdcb64-48bv2 hello world ``` When using SASL you must add `KAFKA_ENABLE_SASL`, `KAFKA_USERNAME` and `KAFKA_PASSWORD` env var to set authentification (might use a secret).: ```yaml $ echo ' --- apiVersion: apps/v1beta1 kind: Deployment metadata: labels: kubeless: kafka-trigger-controller name: kafka-trigger-controller namespace: kubeless spec: selector: matchLabels: kubeless: kafka-trigger-controller template: metadata: labels: kubeless: kafka-trigger-controller spec: containers: - image: bitnami/kafka-trigger-controller:latest imagePullPolicy: IfNotPresent name: kafka-trigger-controller env: ... - name: KAFKA_ENABLE_SASL value: true # CHANGE THIS! - name: KAFKA_USERNAME value: kafka-sasl-username # CHANGE THIS! - name: KAFKA_PASSWORD value: kafka-sasl-password # CHANGE THIS! ... ``` When using SSL to secure kafka communication, you must set `KAFKA_ENABLE_TLS`, and specify some of these: * `KAFKA_CACERTS` to check server certificate * `KAFKA_CERT` and `KAFKA_KEY` to check client certificate * `KAFKA_INSECURE` to skip TLS verfication Example for Kafka controller deployments using TLS `Prerequisite` : Create secrets to hold certificates and keys. ```yaml --- apiVersion: apps/v1beta1 kind: Deployment metadata: labels: kubeless: kafka-trigger-controller name: kafka-trigger-controller namespace: kubeless spec: selector: matchLabels: kubeless: kafka-trigger-controller template: metadata: labels: kubeless: kafka-trigger-controller spec: volumes: - name: kafka-volume secret: secretName: certs-and-keys-secret # REPLACE WITH SECRET HOLDING CERTS AND KEYS containers: - image: bitnami/kafka-trigger-controller:latest imagePullPolicy: IfNotPresent name: kafka-trigger-controller volumeMounts: - name: kafka-volume mountPath: /path/to/certsandkeys env: ... - name: KAFKA_ENABLE_TLS value: "true" # ENABLE TLS - name: KAFKA_CACERTS value: "/path/to/certsandkeys/ca.crt" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE) - name: KAFKA_CERT value: "/path/to/certsandkeys/cert.pem" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE) - name: KAFKA_KEY value: "/path/to/certsandkeys/key.pem" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE) ... ``` ================================================ FILE: examples/Makefile ================================================ GIT_SHA1 ?= master BASE_URL := https://raw.githubusercontent.com/kubeless/kubeless/$(GIT_SHA1) get-python: kubeless function deploy get-python --runtime python3.7 --handler helloget.foo --from-file python/helloget.py get-python-verify: kubeless function call get-python |egrep hello.world kubeless function top --function get-python --out yaml |egrep total_calls.*[1-100000] get-python-update: $(eval TMPDIR := $(shell mktemp -d)) printf 'def foo(event, context):\n%4sreturn "hello world updated"\n' > $(TMPDIR)/hello-updated.py kubeless function update get-python --from-file $(TMPDIR)/hello-updated.py rm -rf $(TMPDIR) get-python-update-verify: kubeless function call get-python |egrep hello.world.updated get-python-deps: cd python && zip hellowithdeps.zip hellowithdeps.py hellowithdepshelper.py && cd .. kubeless function deploy get-python-deps --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.zip --dependencies python/requirements.txt get-python-deps-tar-gz: cd python && tar czf hellowithdeps.tar.gz hellowithdeps.py hellowithdepshelper.py && cd .. kubeless function deploy get-python-deps-tar-gz --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.gz --dependencies python/requirements.txt get-python-deps-tar-bz2: cd python && tar cjf hellowithdeps.tar.bz2 hellowithdeps.py hellowithdepshelper.py && cd .. kubeless function deploy get-python-deps-tar-bz2 --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.bz2 --dependencies python/requirements.txt get-python-deps-tar-xz: cd python && tar cJf hellowithdeps.tar.xz hellowithdeps.py hellowithdepshelper.py && cd .. kubeless function deploy get-python-deps-tar-xz --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.xz --dependencies python/requirements.txt get-python-deps-verify: kubeless function call get-python-deps |egrep Google get-python-deps-tar-gz-verify: kubeless function call get-python-deps-tar-gz |egrep Google get-python-deps-tar-bz2-verify: kubeless function call get-python-deps-tar-bz2 |egrep Google get-python-deps-tar-xz-verify: kubeless function call get-python-deps-tar-xz |egrep Google get-python-custom-port: kubeless function deploy get-python-custom-port --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --port 8081 get-python-custom-port-verify: kubectl get svc get-python-custom-port -o yaml | grep 'targetPort: 8081' kubeless function call get-python-custom-port |egrep hello.world get-python-deps-update: $(eval TMPDIR := $(shell mktemp -d)) printf 'bs4\ntwitter\n' > $(TMPDIR)/requirements.txt kubeless function update get-python-deps --dependencies $(TMPDIR)/requirements.txt rm -rf $(TMPDIR) get-python-deps-update-verify: pod=`kubectl get pod -l function=get-python-deps -o go-template -o custom-columns=:metadata.name --no-headers=true`; \ echo "Checking updated deps of $$pod"; \ kubectl exec -it $$pod pip freeze | grep -q "twitter==" get-python-url-deps: cd python && tar czf hellowithdeps.tgz hellowithdeps.py hellowithdepshelper.py && cd .. kubeless function deploy get-python-url-deps --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tgz --dependencies $(BASE_URL)/examples/python/requirements.txt get-python-url-deps-verify: kubeless function call get-python-url-deps |egrep Google get-node-url-zip: kubeless function deploy get-node-url-zip --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.zip get-node-url-tar-gz: kubeless function deploy get-node-url-tar-gz --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.gz get-node-url-tar-bz2: kubeless function deploy get-node-url-tar-bz2 --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.bz2 get-node-url-tar-xz: kubeless function deploy get-node-url-tar-xz --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.xz get-node-url-zip-verify: kubeless function call get-node-url-zip |egrep hello.world get-node-url-tar-gz-verify: kubeless function call get-node-url-tar-gz |egrep hello.world get-node-url-tar-bz2-verify: kubeless function call get-node-url-tar-bz2 |egrep hello.world get-node-url-tar-xz-verify: kubeless function call get-node-url-tar-xz |egrep hello.world scheduled-get-python: kubeless function deploy scheduled-get-python --schedule "* * * * *" --runtime python3.7 --handler helloget.foo --from-file python/helloget.py scheduled-get-python-verify: number="1"; \ timeout="70"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=scheduled-get-python`; \ logs=`kubectl logs $$pod | grep "GET / HTTP/1.1\" 200 11 \"\""`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found timeout-python: $(eval TMPDIR := $(shell mktemp -d)) printf 'def foo(event, context):\n%4swhile 1: pass\n%4sreturn "hello world"\n' > $(TMPDIR)/hello-loop.py kubeless function deploy timeout-python --runtime python3.7 --handler helloget.foo --from-file $(TMPDIR)/hello-loop.py --timeout 3 rm -rf $(TMPDIR) timeout-python-verify: $(eval MSG := $(shell kubeless function call timeout-python 2>&1 || true)) echo $(MSG) | egrep Request.timeout.exceeded get-nodejs: kubeless function deploy get-nodejs --runtime nodejs10 --handler helloget.foo --from-file nodejs/helloget.js get-nodejs-verify: kubeless function call get-nodejs |egrep hello.world get-nodejs-custom-port: kubeless function deploy get-nodejs-custom-port --runtime nodejs10 --handler helloget.foo --from-file nodejs/helloget.js --port 8083 get-nodejs-custom-port-verify: kubectl get svc get-nodejs-custom-port -o yaml | grep 'targetPort: 8083' kubeless function call get-nodejs-custom-port |egrep hello.world get-nodejs-stream: kubeless function deploy get-nodejs-stream --runtime nodejs10 --handler hellostream.foo --from-file nodejs/hellostream.js --dependencies nodejs/package.json get-nodejs-stream-verify: kubeless function call get-nodejs-stream |egrep hello.world timeout-nodejs: $(eval TMPDIR := $(shell mktemp -d)) printf 'module.exports = { foo: function (event, context) { while(true) {} } }\n' > $(TMPDIR)/hello-loop.js kubeless function deploy timeout-nodejs --runtime nodejs10 --handler helloget.foo --from-file $(TMPDIR)/hello-loop.js --timeout 4 rm -rf $(TMPDIR) timeout-nodejs-verify: $(eval MSG := $(shell kubeless function call timeout-nodejs 2>&1 || true)) echo $(MSG) | egrep Request.timeout.exceeded get-nodejs-deps: kubeless function deploy get-nodejs-deps --runtime nodejs10 --handler helloget.handler --from-file nodejs/hellowithdeps.js --dependencies nodejs/package.json get-nodejs-deps-verify: kubeless function call get-nodejs-deps --data '{"hello": "world"}' | grep -q 'hello.*world.*date.*UTC' get-nodejs-multi: cd nodejs; zip helloFunctions.zip *js kubeless function deploy get-nodejs-multi --runtime nodejs10 --handler index.helloGet --from-file nodejs/helloFunctions.zip rm nodejs/helloFunctions.zip get-nodejs-multi-verify: kubeless function call get-nodejs-multi |egrep hello.world get-go: kubeless function deploy get-go --runtime go1.14 --handler handler.Foo --from-file golang/helloget.go get-go-verify: kubeless function call get-go |egrep Hello.world get-go-custom-port: kubeless function deploy get-go-custom-port --runtime go1.14 --handler helloget.Foo --from-file golang/helloget.go --port 8083 get-go-custom-port-verify: kubectl get svc get-go-custom-port -o yaml | grep 'targetPort: 8083' kubeless function call get-go-custom-port |egrep Hello.world timeout-go: $(eval TMPDIR := $(shell mktemp -d)) printf 'package kubeless\nimport "github.com/kubeless/kubeless/pkg/functions"\nfunc Foo(event functions.Event, context functions.Context) (string, error) {\nfor{\n}\nreturn "", nil\n}' > $(TMPDIR)/hello-loop.js kubeless function deploy timeout-go --runtime go1.14 --handler helloget.Foo --from-file $(TMPDIR)/hello-loop.js --timeout 4 rm -rf $(TMPDIR) timeout-go-verify: $(eval MSG := $(shell kubeless function call timeout-go 2>&1 || true)) echo $(MSG) | egrep Request.timeout.exceeded get-go-deps: kubeless function deploy get-go-deps --runtime go1.14 --handler helloget.Hello --from-file golang/hellowithdeps.go --dependencies golang/go.mod get-go-deps-verify: kubeless function call get-go-deps --data '{"hello": "world"}' kubectl logs --tail=1000 -l function=get-go-deps | grep -q 'level=info msg=.*hello.*world' post-go: kubeless function deploy post-go --runtime go1.14 --handler hellowithdata.Handler --from-file golang/hellowithdata.go post-go-verify: kubeless function call post-go --data '{"it-s": "alive"}'| egrep "it.*alive" # Verify event context logs=`kubectl logs --tail=1000 -l function=post-go`; \ echo $$logs | grep -q "it.*alive" && \ echo $$logs | grep -q "Z" && \ echo $$logs | grep -q "application/json" && \ echo $$logs | grep -q "cli.kubeless.io" get-python-metadata: kubeless function deploy get-python-metadata --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --env foo:bar,bar=foo --memory 128Mi --label foo:bar,bar=foo,foobar get-python-metadata-verify: kubeless function call get-python-metadata |egrep hello.world kubectl get po -o jsonpath='{.items[0].spec.containers[0].env}' -l function=get-python-metadata | grep '"name":"foo","value":"bar"' kubectl get po -o jsonpath='{.items[0].spec.containers[0].env}' -l function=get-python-metadata | grep '"name":"bar","value":"foo"' kubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '"foo":"bar"' kubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '"bar":"foo"' kubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '"foobar":""' get-python-secrets: kubectl create secret generic test-secret --from-literal=key=MY_KEY || true kubeless function deploy get-python-secrets --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --secrets test-secret get-python-secrets-verify: $(eval pod := $(shell kubectl get pod -l function=get-python-secrets -o go-template -o custom-columns=:metadata.name --no-headers=true)) kubectl exec -it $(pod) cat /test-secret/key | egrep "MY_KEY" get-ruby: kubeless function deploy get-ruby --runtime ruby2.4 --handler helloget.foo --from-file ruby/helloget.rb get-ruby-verify: kubeless function call get-ruby |egrep hello.world get-ruby-deps: kubeless function deploy get-ruby-deps --runtime ruby2.4 --handler hellowithdeps.foo --from-file ruby/hellowithdeps.rb --dependencies ruby/Gemfile get-ruby-deps-verify: kubeless function call get-ruby-deps |egrep hello.world get-ruby-custom-port: kubeless function deploy get-ruby-custom-port --runtime ruby2.4 --handler helloget.foo --from-file ruby/helloget.rb --port 8082 get-ruby-custom-port-verify: kubectl get svc get-ruby-custom-port -o yaml | grep 'targetPort: 8082' kubeless function call get-ruby-custom-port |egrep hello.world get-php: kubeless function deploy get-php --runtime php7.2 --handler helloget.foo --from-file php/helloget.php get-php-update: $(eval TMPDIR := $(shell mktemp -d)) printf ' $(TMPDIR)/hello-updated.php kubeless function update get-php --from-file $(TMPDIR)/hello-updated.php rm -rf $(TMPDIR) get-php-update-verify: kubeless function call get-php | egrep "hello.world.updated" get-php-verify: kubeless function call get-php | egrep "hello world" get-php-deps: kubeless function deploy get-php-deps --runtime php7.2 --handler hellowithdeps.foo --from-file php/hellowithdeps.php --dependencies php/composer.json get-php-deps-verify: kubeless function call get-php-deps &> /dev/null kubectl logs --tail=1000 -l function=get-php-deps | egrep "Hello" get-php-deps-update: $(eval TMPDIR := $(shell mktemp -d)) sed "s/1\.23/1\.20/" php/composer.json > $(TMPDIR)/composer.json kubeless function update get-php-deps --dependencies $(TMPDIR)/composer.json get-php-deps-update-verify: $(eval pod := $(shell kubectl get pod -l function=get-php-deps -o go-template -o custom-columns=:metadata.name --no-headers=true)) kubectl exec -it $(pod) cat /kubeless/composer.json | egrep "1.20" post-php: kubeless function deploy post-php --runtime php7.2 --handler hellowithdata.foo --from-file php/hellowithdata.php post-php-verify: kubeless function call post-php --data '{"it-s": "alive"}'| egrep "it.*alive" timeout-php: $(eval TMPDIR := $(shell mktemp -d)) printf ' $(TMPDIR)/hello-loop.php kubeless function deploy timeout-php --runtime php7.2 --handler helloget.foo --from-file $(TMPDIR)/hello-loop.php --timeout 4 rm -rf $(TMPDIR) timeout-php-verify: $(eval MSG := $(shell kubeless function call timeout-php 2>&1 || true)) echo $(MSG) | egrep Request.timeout.exceeded timeout-ruby: $(eval TMPDIR := $(shell mktemp -d)) printf 'def foo(event, context)\n%4swhile true do;sleep(1);end\n%4s"hello world"\nend' > $(TMPDIR)/hello-loop.rb kubeless function deploy timeout-ruby --runtime ruby2.4 --handler helloget.foo --from-file $(TMPDIR)/hello-loop.rb --timeout 4 rm -rf $(TMPDIR) timeout-ruby-verify: $(eval MSG := $(shell { time kubeless function call timeout-ruby; } 2>&1 || true)) echo $(MSG) | egrep Request.timeout.exceeded echo $(MSG) | egrep "real\s*0m4." get-dotnetcore: kubeless function deploy get-dotnetcore --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/helloget.cs get-dotnetcore-verify: kubeless function call get-dotnetcore |egrep hello.world kubeless function top --function get-dotnetcore --out yaml |egrep "Function does not expose metrics" get-dotnetcore-dependency: kubeless function deploy get-dotnetcore-dependency --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/dependency-yaml.cs --dependencies dotnetcore/dependency-yaml.csproj get-dotnetcore-dependency-verify: kubeless function call get-dotnetcore-dependency |egrep Name:\ Michael custom-get-python: kubeless function deploy --runtime-image kubeless/get-python-example@sha256:6a14400f14e26d46a971445b7a850af533fe40cb75a67297283bdf536e09ca5e custom-get-python custom-get-python-verify: kubeless function call custom-get-python |egrep hello.world custom-get-python-update: kubeless function update --runtime-image kubeless/get-python-example@sha256:174beab98e6fa454e21121302395375e90a324e9276367296aab0eb5b4aa8922 custom-get-python custom-get-python-update-verify: kubeless function call custom-get-python |egrep hello.world.updated get: get-python get-nodejs get-python-metadata get-ruby get-ruby-deps get-python-custom-port post-python: kubeless function deploy post-python --runtime python3.7 --handler hellowithdata.handler --from-file python/hellowithdata.py post-python-verify: kubeless function call post-python --data '{"it-s": "alive"}'|egrep "it.*alive" # Verify event context logs=`kubectl logs --tail=1000 -l function=post-python`; \ echo $$logs | grep -q "it.*alive" && \ echo $$logs | grep -q "event-time.*Z" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*cli.kubeless.io" && \ echo $$logs | grep -q "event-id.*" post-python-custom-port: kubeless function deploy post-python-custom-port --runtime python3.7 --handler hellowithdata.handler --from-file python/hellowithdata.py --port 8081 post-python-custom-port-verify: kubectl get svc post-python-custom-port -o yaml | grep 'targetPort: 8081' kubeless function call post-python-custom-port --data '{"it-s": "alive"}'|egrep "it.*alive" post-nodejs: kubeless function deploy post-nodejs --runtime nodejs10 --handler hellowithdata.handler --from-file nodejs/hellowithdata.js post-nodejs-verify: kubeless function call post-nodejs --data '{"it-s": "alive"}'|egrep "it.*alive" # Verify event context logs=`kubectl logs --tail=1000 -l function=post-nodejs`; \ echo $$logs | grep -q "it.*alive" && \ echo $$logs | grep -q "event-time.*Z" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*cli.kubeless.io" && \ echo $$logs | grep -q "event-id.*" post-ruby: kubeless function deploy post-ruby --runtime ruby2.4 --handler hellowithdata.handler --from-file ruby/hellowithdata.rb post-ruby-verify: kubeless function call post-ruby --data '{"it-s": "alive"}'|egrep "it.*alive" # Verify event context logs=`kubectl logs --tail=1000 -l function=post-ruby`; \ echo $$logs | grep -q "it.*alive" && \ echo $$logs | grep -q "event-time.*Z" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*cli.kubeless.io" && \ echo $$logs | grep -q "event-id.*" post-dotnetcore: kubeless function deploy post-dotnetcore --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/hellowithdata.cs post-dotnetcore-verify: kubeless function call post-dotnetcore --data '{"it-s": "alive"}'|egrep "it.*alive" post: post-python post-nodejs post-ruby post-python-custom-port pubsub-python: kubeless topic create s3-python || true kubeless function deploy pubsub-python --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py kubeless trigger kafka create pubsub-python --function-selector created-by=kubeless,function=pubsub-python --trigger-topic s3-python # Generate a random string to inject into s3 topic, # then "tail -f" until it shows (with timeout) pubsub-python-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-python --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-python`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=pubsub-python`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*kafkatriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" python-nats: kubeless function deploy python-nats --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py kubeless trigger nats create python-nats --function-selector created-by=kubeless,function=python-nats --trigger-topic test python-nats-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic test --message '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=python-nats`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=python-nats`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*natstriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" python-kinesis: kubeless function deploy python-kinesis --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py $(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubectl create secret generic ec2 --from-literal=aws_access_key_id=kinesalite --from-literal=aws_secret_access_key=kinesalite kubeless trigger kinesis create-stream --aws-region kinesalite --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --shard-count 1 --stream-name kubeless-stream kubeless trigger kinesis create kinesis-trigger --function-name python-kinesis --aws-region kinesalite --shard-id shardId-000000000000 --stream kubeless-stream --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) python-kinesis-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger kinesis publish --aws-region kinesalite --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --partition-key key1 --stream kubeless-stream --records '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=python-kinesis`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=python-kinesis`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*kinesistriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" python-kinesis-multi-record: kubeless function deploy python-kinesis-multi-record --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py $(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger kinesis create kinesis-trigger-mr --function-name python-kinesis-multi-record --aws-region kinesalite --shard-id shardId-000000000000 --stream kubeless-stream --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) python-kinesis-multi-record-verify: $(eval DATA1 := $(shell mktemp -u -t XXXXXXXX)) $(eval DATA2 := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger kinesis publish --aws-region kinesalite --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --partition-key key1 --stream kubeless-stream --records '{"payload":"$(DATA1)"}' --records '{"payload":"$(DATA2)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=python-kinesis-multi-record`; \ logs1=`kubectl logs $$pod | grep $(DATA1)`; \ logs2=`kubectl logs $$pod | grep $(DATA2)`; \ if [ "$$logs1" != "" ] && [ "$$logs2" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=python-kinesis-multi-record`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*kinesistriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" nats-python-func1-topic-test: kubeless function deploy nats-python-func1-topic-test --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=nats-topic-test nats-python-func2-topic-test: kubeless function deploy nats-python-func2-topic-test --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=nats-topic-test nats-python-func-multi-topic: kubeless function deploy nats-python-func-multi-topic --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label func=nats-python-func-multi-topic nats-python-trigger-topic-test: kubeless trigger nats create nats-python-trigger-topic-test --function-selector created-by=kubeless,topic=nats-topic-test --trigger-topic topic-test nats-python-trigger-topic1: kubeless trigger nats create nats-python-trigger-topic1 --function-selector created-by=kubeless,func=nats-python-func-multi-topic --trigger-topic topic1 nats-python-trigger-topic2: kubeless trigger nats create nats-python-trigger-topic2 --function-selector created-by=kubeless,func=nats-python-func-multi-topic --trigger-topic topic2 nats-python-func1-topic-test-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic-test --message '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=nats-python-func1-topic-test`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=nats-python-func1-topic-test`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*natstriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" nats-python-func2-topic-test-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic-test --message '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=nats-python-func2-topic-test`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=nats-python-func2-topic-test`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*natstriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" nats-python-func-multi-topic-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) $(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath="{.spec.ports[0].nodePort}")) $(eval MINIKUBE_IP := $(shell minikube ip)) kubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic1 --message '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=nats-python-func-multi-topic`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=nats-python-func-multi-topic`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*natstriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" kubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic2 --message '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=nats-python-func-multi-topic`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found # Verify event context logs=`kubectl logs --tail=1000 -l function=nats-python-func-multi-topic`; \ echo $$logs | grep -q "event-time.*UTC" && \ echo $$logs | grep -q "event-type.*application/json" && \ echo $$logs | grep -q "event-namespace.*natstriggers.kubeless.io" && \ echo $$logs | grep -q "event-id.*" kafka-python-func1-topic-s3-python: kubeless topic create s3-python || true kubeless function deploy kafka-python-func1-topic-s3-python --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=s3-python kafka-python-func1-topic-s3-python-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-python --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=kafka-python-func1-topic-s3-python`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found kafka-python-func2-topic-s3-python: kubeless topic create s3-python || true kubeless function deploy kafka-python-func2-topic-s3-python --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=s3-python kafka-python-func2-topic-s3-python-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-python --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=kafka-python-func2-topic-s3-python`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found s3-python-kafka-trigger: kubeless trigger kafka create s3-python-kafka-trigger --function-selector created-by=kubeless,topic=s3-python --trigger-topic s3-python pubsub-python34: kubeless topic create s3-python34 || true kubeless function deploy pubsub-python34 --runtime python3.4 --handler pubsub-python.handler --from-file python/hellowithdata34.py kubeless trigger kafka create pubsub-python34 --function-selector created-by=kubeless,function=pubsub-python34 --trigger-topic s3-python34 pubsub-python34-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-python34 --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-python34`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub-python36: kubeless topic create s3-python36 || true kubeless function deploy pubsub-python36 --runtime python3.6 --handler pubsub-python.handler --from-file python/pubsub.py kubeless trigger kafka create pubsub-python36 --function-selector created-by=kubeless,function=pubsub-python36 --trigger-topic s3-python36 pubsub-python36-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-python36 --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-python36`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub-nodejs: kubeless topic create s3-nodejs || true kubeless function deploy pubsub-nodejs --runtime nodejs10 --handler pubsub-nodejs.handler --from-file nodejs/hellowithdata.js kubeless trigger kafka create pubsub-nodejs --function-selector created-by=kubeless,function=pubsub-nodejs --trigger-topic s3-nodejs pubsub-nodejs-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-nodejs --data '{"test": "$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-nodejs`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub-nodejs-update: kubeless topic create s3-nodejs-2 || true kubeless trigger kafka update pubsub-nodejs --trigger-topic s3-nodejs-2 pubsub-nodejs-update-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-nodejs-2 --data '{"test": "$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-nodejs`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub-ruby: kubeless topic create s3-ruby || true kubeless function deploy pubsub-ruby --runtime ruby2.4 --handler pubsub-ruby.handler --from-file ruby/hellowithdata.rb kubeless trigger kafka create pubsub-ruby --function-selector created-by=kubeless,function=pubsub-ruby --trigger-topic s3-ruby pubsub-ruby-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-ruby --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-ruby`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub-go: kubeless topic create s3-go || true kubeless function deploy pubsub-go --runtime go1.14 --handler pubsub-go.Handler --from-file golang/hellowithdata.go kubeless trigger kafka create pubsub-go --function-selector created-by=kubeless,function=pubsub-go --trigger-topic s3-go pubsub-go-verify: $(eval DATA := $(shell mktemp -u -t XXXXXXXX)) kubeless topic publish --topic s3-go --data '{"payload":"$(DATA)"}' number="1"; \ timeout="60"; \ found=false; \ while [ $$number -le $$timeout ] ; do \ pod=`kubectl get po -oname -l function=pubsub-go`; \ logs=`kubectl logs $$pod | grep $(DATA)`; \ if [ "$$logs" != "" ]; then \ found=true; \ break; \ fi; \ sleep 1; \ number=`expr $$number + 1`; \ done; \ $$found pubsub: pubsub-python pubsub-nodejs pubsub-ruby get-java: kubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file java/HelloGet.java get-java-verify: kubeless function call get-java |egrep Hello.world post-java: kubeless function deploy post-java --runtime java1.8 --handler Foo.foo --from-file java/HelloWithData.java post-java-verify: kubeless function call post-java --data '{"its": "alive"}'| egrep "its.*alive" get-java-deps: kubeless function deploy get-java-deps --runtime java1.8 --handler Hello.sayHello --from-file java/HelloWithDeps.java --dependencies java/pom.xml get-java-deps-verify: kubeless function call get-java-deps --data '{"hello": "world"}' kubectl logs --tail=1000 -l function=get-java-deps | grep -q '.*Hello.*world! Current local time is:' get-jvm-java: kubeless function deploy get-jvm-java --runtime jvm1.8 --from-file jvm/java/test-java-jvm.jar --handler io_ino_Handler.sayHello get-jvm-java-verify: kubeless function call get-jvm-java | grep "Hello world" get-nodejs-distroless: kubeless function deploy get-nodejs-distroless --runtime nodejs_distroless8 --handler helloget.foo --from-file nodejs/helloget.js get-nodejs-distroless-verify: kubeless function call get-nodejs-distroless |egrep hello.world get-nodejs-distroless-deps: kubeless function deploy get-nodejs-distroless-deps --runtime nodejs_distroless8 --handler helloget.handler --from-file nodejs/hellowithdeps.js --dependencies nodejs/package.json get-nodejs-distroless-deps-verify: kubeless function call get-nodejs-distroless-deps --data '{"hello": "world"}' | grep -q 'hello.*world.*date.*UTC' get-ballerina: kubeless function deploy get-ballerina --runtime ballerina0.981.0 --from-file ballerina/helloget.bal --handler helloget.foo get-ballerina-verify: kubeless function call get-ballerina |egrep Hello.World.Ballerina get-ballerina-custom-port: kubeless function deploy get-ballerina-custom-port --runtime ballerina0.981.0 --handler helloget.foo --from-file ballerina/helloget.bal --port 8083 get-ballerina-custom-port-verify: kubectl get svc get-ballerina-custom-port -o yaml | grep 'targetPort: 8083' kubeless function call get-ballerina-custom-port |egrep Hello.World.Ballerina get-ballerina-data: kubeless function deploy get-ballerina-data --runtime ballerina0.981.0 --from-file ballerina/hellowithdata.bal --handler hellowithdata.foo get-ballerina-data-verify: kubeless function call get-ballerina-data --data '{"hello":"world"}' |egrep hello get-ballerina-conf: zip -r -j ballerina/bar.zip ballerina/hello_with_conf/ kubeless function deploy get-ballerina-conf --runtime ballerina0.981.0 --from-file ballerina/bar.zip --handler hello_with_conf.bar rm ballerina/bar.zip get-ballerina-conf-verify: kubeless function call get-ballerina-conf | egrep john ================================================ FILE: examples/README.md ================================================ # Examples This directory contains basic examples for kubeless. Specifically it contains examples that we can test quickly using the `Makefile`. Some of these examples are run during our integration tests. Check the [Makefile](Makefile) Then run some of the examples like so: ``` make post-python ``` Or a different runtime: ``` make post-dotnetcore ``` Or a PubSub example: ``` make pubsub-python ``` # Looking for more function examples? You can find more examples at [https://github.com/kubeless/functions](https://github.com/kubeless/functions) ================================================ FILE: examples/ballerina/hello_with_conf/hello_with_conf.bal ================================================ import kubeless/kubeless; import ballerina/io; import ballerina/config; public function bar(kubeless:Event event, kubeless:Context context) returns (string|error) { io:println(event); io:println(context); return config:getAsString("hello.userid"); } ================================================ FILE: examples/ballerina/hello_with_conf/kubeless.toml ================================================ [hello] userid="john@ballerina.com" ================================================ FILE: examples/ballerina/helloget.bal ================================================ import kubeless/kubeless; public function foo(kubeless:Event event, kubeless:Context context) returns (string|error) { return "Hello World Ballerina"; } ================================================ FILE: examples/ballerina/hellowithdata.bal ================================================ import kubeless/kubeless; import ballerina/io; public function foo(kubeless:Event event, kubeless:Context context) returns (string|error) { io:println(event); io:println(context); return event.data; } ================================================ FILE: examples/dotnetcore/dependency-yaml.cs ================================================ using System; using Kubeless.Functions; using YamlDotNet.Serialization; public class module { public string handler(Event k8Event, Context k8Context) { var person = new Person() { Name = "Michael J. Fox", Age = 56 }; var serializer = new SerializerBuilder().Build(); return serializer.Serialize(person); // yaml } } public class Person { public string Name { get; set; } public int Age { get; set; } } ================================================ FILE: examples/dotnetcore/dependency-yaml.csproj ================================================ netstandard2.0 ================================================ FILE: examples/dotnetcore/fibonacci.cs ================================================ using System; using Kubeless.Functions; public class module { public int handler(Event k8Event, Context k8Context) { var n = int.Parse(k8Event.Data.ToString()); return fibonacci(n); } public int fibonacci(int n) { if ((n == 0) || (n == 1)) return n; return fibonacci(n - 1) + fibonacci(n - 2); } } ================================================ FILE: examples/dotnetcore/fibonacci.csproj ================================================ netstandard2.0 ================================================ FILE: examples/dotnetcore/helloget.cs ================================================ using System; using Kubeless.Functions; public class module { public string handler(Event k8Event, Context k8Context) { return "hello world"; } } ================================================ FILE: examples/dotnetcore/helloget.csproj ================================================ netstandard2.0 ================================================ FILE: examples/dotnetcore/hellowithdata.cs ================================================ using System; using Kubeless.Functions; public class module { public object handler(Event k8Event, Context k8Context) { return k8Event.Data; } } ================================================ FILE: examples/dotnetcore/hellowithdata.csproj ================================================ netstandard2.0 ================================================ FILE: examples/golang/go.mod ================================================ module function go 1.14 require ( github.com/sirupsen/logrus v1.6.0 ) ================================================ FILE: examples/golang/helloget.go ================================================ package kubeless import ( "github.com/kubeless/kubeless/pkg/functions" ) // Foo sample function func Foo(event functions.Event, context functions.Context) (string, error) { return "Hello world!", nil } ================================================ FILE: examples/golang/hellowithdata.go ================================================ package kubeless import ( "fmt" "github.com/kubeless/kubeless/pkg/functions" ) // Handler sample function with data func Handler(event functions.Event, context functions.Context) (string, error) { fmt.Println(event) return event.Data, nil } ================================================ FILE: examples/golang/hellowithdeps.go ================================================ package kubeless import ( "github.com/kubeless/kubeless/pkg/functions" "github.com/sirupsen/logrus" ) // Hello sample function with dependencies func Hello(event functions.Event, context functions.Context) (string, error) { logrus.Info(event.Data) return "Hello world!", nil } ================================================ FILE: examples/java/HelloGet.java ================================================ package io.kubeless; import io.kubeless.Event; import io.kubeless.Context; public class Foo { public String foo(io.kubeless.Event event, io.kubeless.Context context) { return "Hello world!"; } } ================================================ FILE: examples/java/HelloWithData.java ================================================ package io.kubeless; import io.kubeless.Event; import io.kubeless.Context; public class Foo { public String foo(io.kubeless.Event event, io.kubeless.Context context) { System.out.println(event.Data); return event.Data; } } ================================================ FILE: examples/java/HelloWithDeps.java ================================================ package io.kubeless; import io.kubeless.Event; import io.kubeless.Context; import org.joda.time.LocalTime; public class Hello { public String sayHello(io.kubeless.Event event, io.kubeless.Context context) { System.out.println(event.Data); LocalTime currentTime = new LocalTime(); return "Hello world! Current local time is: " + currentTime; } } ================================================ FILE: examples/java/pom.xml ================================================ 4.0.0 function function 1.0-SNAPSHOT joda-time joda-time 2.9.2 io.kubeless params 1.0-SNAPSHOT io.kubeless kubeless 1.0-SNAPSHOT ================================================ FILE: examples/jvm/Readme.md ================================================ # JVM exampels These are examples to run compiled jvm code in kubeless. They should serve as a template to be able to use other languages. ================================================ FILE: examples/jvm/java/Readme.md ================================================ # Java on runtime JVM `gradle shadowJar` Build the jar with all deps `kubeless function deploy test --runtime jvm1.8 --from-file build/libs/jvm-test-0.1-all.jar --handler io_ino_Handler.sayHello` The package name use `_` instead of `.` for the path. ================================================ FILE: examples/jvm/java/build.gradle ================================================ buildscript { repositories { jcenter() } dependencies { classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4' } } apply plugin: 'java' apply plugin: 'com.github.johnrengelman.shadow' version = '0.1' jar { manifest { attributes 'Implementation-Title': 'jvm-test', 'Implementation-Version': version } } repositories { mavenCentral() } dependencies { compile group: 'log4j', name: 'log4j', version: '1.2.17' compile group: 'de.inoio.kubeless', name: 'jvm-runtime', version: '0.1' testCompile group: 'junit', name: 'junit', version: '4.+' } ================================================ FILE: examples/jvm/java/src/main/java/io/ino/Handler.java ================================================ package io.ino; public class Handler { public String sayHello(io.kubeless.Event event, io.kubeless.Context context) { System.out.println(event.toString()); return "Hello world! AFDFCH"; } } ================================================ FILE: examples/jvm/scala/Readme.md ================================================ # Scala on runtime JVM !! WIP the jar-file is to large for the storage backend, you have to pass a url to the jar file. `sbt assembly` Build the jar with all deps `kubeless function deploy testscala --runtime jvm1.8 --from-file target/scala-2.12/scala-test.jar --handler de_inoio_Handler.fooBar` The package name use `_` instead of `.` for the path. ================================================ FILE: examples/jvm/scala/build.sbt ================================================ assemblyJarName in assembly := "scala-test.jar" organization := "de.inoio" scalaVersion := "2.12.1" libraryDependencies += "de.inoio.kubeless" % "jvm-runtime" % "0.1" ================================================ FILE: examples/jvm/scala/project/assembly.sbt ================================================ addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.7") ================================================ FILE: examples/jvm/scala/project/build.properties ================================================ sbt.version=0.13.15 ================================================ FILE: examples/jvm/scala/src/main/scala/de/inoio/Handler.scala ================================================ package de.inoio import io.kubeless.{Context, Event} class Handler { def fooBar(event: Event, context: Context): String = { "FOO Bar aus Hamburg" } } ================================================ FILE: examples/nodejs/function.yaml ================================================ --- apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: hello spec: handler: handler.hello runtime: nodejs6 function: | module.exports = { hello: function(event, context) { return 'Hello World' } } ================================================ FILE: examples/nodejs/function1.yaml ================================================ --- apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: hello spec: handler: handler.foobar runtime: nodejs8 function: | module.exports = { foobar: function (event, context) { return(event.data) } } ================================================ FILE: examples/nodejs/helloget.js ================================================ module.exports = { foo: function (event, context) { return 'hello world!'; } } ================================================ FILE: examples/nodejs/hellostream.js ================================================ const from = require('from2'); const eos = require('end-of-stream'); function fromString(string) { return from(function(size, next) { if (string.length <= 0) return next(null, null); const chunk = string.slice(0, size); string = string.slice(size); next(null, chunk); }); } module.exports = { foo: function(event, context) { return new Promise((resolve, reject) => { const {response} = event.extensions; const stream = fromString('hello world!'); eos(stream, err => err ? reject(err) : resolve(stream)); response.setHeader('Content-Type', 'text/event-stream; charset=utf-8'); stream.pipe(response); }); } } ================================================ FILE: examples/nodejs/hellowithdata.js ================================================ module.exports = { handler: (event, context) => { console.log(event); return event.data; }, }; ================================================ FILE: examples/nodejs/hellowithdeps.js ================================================ 'use strict'; const _ = require('lodash'); module.exports = { handler: (event, context) => { _.assign(event.data, {date: new Date().toTimeString()}) return JSON.stringify(event.data); }, }; ================================================ FILE: examples/nodejs/index.js ================================================ 'use strict'; module.exports = { helloGet: require('./helloget').foo, helloWithData: require('./hellowithdata').handler, } ================================================ FILE: examples/nodejs/package.json ================================================ { "name": "hellowithdeps", "version": "0.0.1", "dependencies": { "end-of-stream": "^1.4.1", "from2": "^2.3.0", "lodash": "^4.17.5" } } ================================================ FILE: examples/php/composer.json ================================================ { "require": { "monolog/monolog": "^1.23" } } ================================================ FILE: examples/php/helloget.php ================================================ data); } ================================================ FILE: examples/php/hellowithdeps.php ================================================ pushHandler(new StreamHandler("php://stdout", Logger::INFO)); // add records to the log $log->info('Hello'); $log->info('World'); return "hello world"; } ================================================ FILE: examples/python/Dockerfile ================================================ # Create a custom image with a python function FROM kubeless/python@sha256:565bebecb08d9a7b804c588105677a3572f10ff2032cef7727975061a653fb98 ENV FUNC_HANDLER=foo \ MOD_NAME=helloget ADD helloget.py / RUN mkdir -p /kubeless/ RUN chown 1000:1000 /kubeless ENTRYPOINT [ "bash", "-c", "cp /helloget.py /kubeless/ && python3.7 /kubeless.py"] ================================================ FILE: examples/python/function.yaml ================================================ --- apiVersion: k8s.io/v1 kind: Function metadata: name: function spec: handler: hello.handler runtime: python3.7 function: | import json def handler(): return "hello world" ================================================ FILE: examples/python/function1.yaml ================================================ --- apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: function1 spec: handler: hello.foobar runtime: python3.7 deps: | cowpy function: | import time import random from cowpy import cow def foobar(): # NB: delay will be negative and sleep will raise an error # occasionally. This is a feature for demoing errors. delay = random.normalvariate(0.3, 0.2) time.sleep(delay) msg = "hello world - with a %0.2fs artificial delay" % delay c = cow.get_cow() return c().milk(msg) ================================================ FILE: examples/python/helloget.py ================================================ def foo(event, context): return "hello world" ================================================ FILE: examples/python/hellowithdata.py ================================================ def handler(event, context): print(event) return event['data'] ================================================ FILE: examples/python/hellowithdeps.py ================================================ from hellowithdepshelper import foo ================================================ FILE: examples/python/hellowithdepshelper.py ================================================ from bs4 import BeautifulSoup import urllib.request def foo(event, context): page = urllib.request.urlopen("https://www.google.com/").read() soup = BeautifulSoup(page, 'html.parser') return soup.title.string ================================================ FILE: examples/python/requirements.txt ================================================ bs4 ================================================ FILE: examples/ruby/Gemfile ================================================ source 'https://rubygems.org' gem 'logging' ================================================ FILE: examples/ruby/function.yaml ================================================ --- apiVersion: kubeless.io/v1beta1 kind: Function metadata: name: function spec: handler: test.run runtime: ruby2.4 function: | # Obtains the latest Kubeless release published def run(event, context) require "net/https" require "uri" require "json" # Fetch release info uri = URI.parse("https://api.github.com/repos/kubeless/kubeless/releases") http = Net::HTTP.new(uri.host, uri.port) request = Net::HTTP::Get.new(uri.request_uri) http.use_ssl = true response = http.request(request) # Parse response output = JSON.parse(response.body) # Create a Hash for output output_hash = { version: output.first['name'] } # Print the stuff (JSON) puts JSON.pretty_generate(output_hash) return output_hash[:version] end ================================================ FILE: examples/ruby/helloget.rb ================================================ def foo(event, context) "hello world" end ================================================ FILE: examples/ruby/hellowithdata.rb ================================================ def handler(event, context) puts event JSON.generate(event[:data]) end ================================================ FILE: examples/ruby/hellowithdeps.rb ================================================ require 'logging' def foo(event, context) logging = Logging.logger(STDOUT) logging.info "it works!" "hello world" end ================================================ FILE: examples/ruby/latest.rb ================================================ # Obtains the latest Kubeless release published def handler(event, context) require "net/https" require "uri" require "json" # Fetch release info uri = URI.parse("https://api.github.com/repos/kubeless/kubeless/releases") http = Net::HTTP.new(uri.host, uri.port) request = Net::HTTP::Get.new(uri.request_uri) http.use_ssl = true response = http.request(request) # Parse response output = JSON.parse(response.body) # Create a Hash for output output_hash = { version: output.first['name'] } # Print the stuff (JSON) puts JSON.pretty_generate(output_hash) end ================================================ FILE: go.mod ================================================ module github.com/kubeless/kubeless go 1.12 require ( github.com/Azure/go-autorest v8.0.0+incompatible // indirect github.com/aws/aws-sdk-go v1.16.26 github.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973 github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988 // indirect github.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282 github.com/imdario/mergo v0.3.7 github.com/kubeless/cronjob-trigger v1.0.2 github.com/kubeless/http-trigger v1.0.0 github.com/kubeless/kafka-trigger v1.0.1 github.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9 github.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc github.com/mattn/go-runewidth v0.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/nats-io/gnatsd v1.4.1 // indirect github.com/nats-io/go-nats v1.7.0 github.com/nats-io/nkeys v0.0.2 // indirect github.com/nats-io/nuid v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v0.9.3 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect github.com/prometheus/common v0.4.0 github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967 github.com/sirupsen/logrus v1.2.0 github.com/spf13/cobra v1.1.1 golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d // indirect golang.org/x/net v0.0.0-20190620200207-3b0461eec859 gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/api v0.0.0-20180308224125-73d903622b73 k8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038 k8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e k8s.io/client-go v7.0.0+incompatible ) ================================================ FILE: go.sum ================================================ cloud.google.com/go v0.0.0-20160913182117-3b1ae45394a2/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8= cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/go-autorest v8.0.0+incompatible h1:lgmv/yX7Zgt1TJEYG8DHCqc0zw5FkYevByNVIm77JNM= github.com/Azure/go-autorest v8.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.16.26 h1:GWkl3rkRO/JGRTWoLLIqwf7AWC4/W/1hMOUZqmX0js4= github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973 h1:7a78CgFQnnKoQomLoxGgKMaUp7QO9amd/IrifrECbmY= github.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973/go.mod h1:SO+r5yZUacDFPKHfPoUjI3hMsH+ZUdiuNNhuSq3WoSg= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988 h1:fajr0WpQtCjYtwtH5zivs/sXvMcPcT/ebx+HdyD11NA= github.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282 h1:KFqmdzEPbU7Uck2tn50t+HQXZNVkxe8M9qRb/ZoSHaE= github.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.0.0-20180119215619-163f41321a19/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20170829155851-36b14963da70/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v0.0.0-20170523012141-5b9ff8664717/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kubeless/cronjob-trigger v1.0.2 h1:/hAkCMY7dTeP8oPo2lmPWmuEdQVcydPmUc10EfwGYaQ= github.com/kubeless/cronjob-trigger v1.0.2/go.mod h1:Ktn0pfVcg2EG6XoV7MNBlsiyKm/RyUu87oRqdpMR1qM= github.com/kubeless/http-trigger v1.0.0 h1:CciPHVu1Rf8oi67GOdMmhySILHYxxQDndiDDm+VoYfw= github.com/kubeless/http-trigger v1.0.0/go.mod h1:a3DdjXl1CXccRLyiM4BeYpwW4Pt6q2viEm0mI6Wvaps= github.com/kubeless/kafka-trigger v1.0.1 h1:XcKCe92i/+hww8fb2gNNAgCBKQNzuing9h97d1G8Jeg= github.com/kubeless/kafka-trigger v1.0.1/go.mod h1:giDA+x4a/T6o0vWhHvZkas6N4B/cMjOv7fb3hnorMUI= github.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9 h1:D+VuPkR46FGkP2dvH49fTF0dF/+Kz98H3Wy9BD3+ZGg= github.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9/go.mod h1:Zz4cU6vaCS71yy+DpAx3/Y2HeV0RsJ3f+/5WCSeYq24= github.com/kubeless/kubeless v1.0.0-alpha.6/go.mod h1:eBSqNpFBgiemDH1gmDcIndBDbGgoZJobww4ZaFK9N1k= github.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc h1:64KDKAkb6xOt+Os36M9nblvBdWpMKNsQdc542wurU0E= github.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc/go.mod h1:VgX8QhZAcW/DUMyMZbdfodjzLfZCZwSxd2q8XKNi1rs= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v0.0.0-20150406173934-fc2b8d3a73c4/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44= github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= github.com/nats-io/go-nats v1.7.0 h1:oQOfHcLr8hb43QG8yeVyY2jtarIaTjOv41CGdF3tTvQ= github.com/nats-io/go-nats v1.7.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs= github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.0.0-20180311214515-816c9085562c/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20170427095455-13ba4ddd0caa/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5 h1:Etei0Wx6pooT/DeOKcGTr1M/01ggz95Ajq8BBwCOKBU= github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967 h1:x7xEyJDP7Hv3LVgvWhzioQqbC/KtuUhTigKlH/8ehhE= github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v0.0.0-20180129181852-768a92a02685/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664 h1:YbZJ76lQ1BqNhVe7dKTSB67wDrc2VPRR75IyGyyPDX8= golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 h1:VeAkjQVzKLmu+JnFcK96TPbkuaTIqwGGAzQ9hgwPjVg= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.0.0-20180103175015-389dfa299845/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20180308224125-73d903622b73 h1:5Z+PFfTIOXwKmOhQtZ0WBykbpGBBOuvbDx2YNAqIoYc= k8s.io/api v0.0.0-20180308224125-73d903622b73/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apiextensions-apiserver v0.0.0-20180103181712-d0becfa6529e/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038 h1:VcfogrrvSU1RneMsMUOMf+1o5fN+SFcSrMw3I/yv3LU= k8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apimachinery v0.0.0-20180103174757-bc110fd540ab/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e h1:CsgbEA8905OlpVLNKWD4GacPex50kFbqhotVNPew+dU= k8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/client-go v5.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v7.0.0+incompatible h1:kiH+Y6hn+pc78QS/mtBfMJAMIIaWevHi++JvOGEEQp4= k8s.io/client-go v7.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/kube-openapi v0.0.0-20170830100654-868f2f29720b/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= ================================================ FILE: hack/boilerplate.go.txt ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ ================================================ FILE: hack/update-codegen.sh ================================================ #!/bin/bash # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. ### Workaround for issue: https://github.com/kubernetes/code-generator/issues/6 mkdir -p ${GOPATH}/src/k8s.io/kubernetes/hack/boilerplate cp ${SCRIPT_ROOT}/hack/boilerplate.go.txt ${GOPATH}/src/k8s.io/kubernetes/hack/boilerplate/ ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ github.com/kubeless/kubeless/pkg/client github.com/kubeless/kubeless/pkg/apis \ kubeless:v1beta1 ================================================ FILE: kubeless-non-rbac.jsonnet ================================================ local k = import "ksonnet.beta.1/k.libsonnet"; local runtimesSrc = import "runtimes.jsonnet"; local objectMeta = k.core.v1.objectMeta; local deployment = k.apps.v1beta1.deployment; local container = k.core.v1.container; local service = k.core.v1.service; local serviceAccount = k.core.v1.serviceAccount; local configMap = k.core.v1.configMap; local namespace = "kubeless"; local controller_account_name = "controller-acct"; local controllerEnv = [ { name: "KUBELESS_INGRESS_ENABLED", valueFrom: {configMapKeyRef: {"name": "kubeless-config", key: "ingress-enabled"}} }, { name: "KUBELESS_SERVICE_TYPE", valueFrom: {configMapKeyRef: {"name": "kubeless-config", key: "service-type"}} }, { name: "KUBELESS_NAMESPACE", valueFrom: {fieldRef: {fieldPath: "metadata.namespace"}} }, { name: "KUBELESS_CONFIG", value: "kubeless-config" }, ]; local functionControllerContainer = container.default("kubeless-function-controller", "kubeless/function-controller:latest") + container.imagePullPolicy("IfNotPresent") + container.env(controllerEnv); local httpTriggerControllerContainer = container.default("http-trigger-controller", "kubeless/http-trigger-controller:v1.0.3") + container.imagePullPolicy("IfNotPresent") + container.env(controllerEnv); local cronjobTriggerContainer = container.default("cronjob-trigger-controller", "kubeless/cronjob-trigger-controller:v1.0.3") + container.imagePullPolicy("IfNotPresent") + container.env(controllerEnv); local kubelessLabel = {kubeless: "controller"}; local controllerAccount = serviceAccount.default(controller_account_name, namespace); local controllerDeployment = deployment.default("kubeless-controller-manager", [functionControllerContainer, httpTriggerControllerContainer, cronjobTriggerContainer], namespace) + {apiVersion: "apps/v1"} + {metadata+:{labels: kubelessLabel}} + {spec+: {selector: {matchLabels: kubelessLabel}}} + {spec+: {template+: {spec+: {serviceAccountName: controllerAccount.metadata.name}}}} + {spec+: {template+: {metadata: {labels: kubelessLabel}}}}; local crd = [ { apiVersion: "apiextensions.k8s.io/v1beta1", kind: "CustomResourceDefinition", metadata: objectMeta.name("functions.kubeless.io"), spec: {group: "kubeless.io", version: "v1beta1", scope: "Namespaced", names: {plural: "functions", singular: "function", kind: "Function"}}, }, { apiVersion: "apiextensions.k8s.io/v1beta1", kind: "CustomResourceDefinition", metadata: objectMeta.name("httptriggers.kubeless.io"), spec: {group: "kubeless.io", version: "v1beta1", scope: "Namespaced", names: {plural: "httptriggers", singular: "httptrigger", kind: "HTTPTrigger"}}, }, { apiVersion: "apiextensions.k8s.io/v1beta1", kind: "CustomResourceDefinition", metadata: objectMeta.name("cronjobtriggers.kubeless.io"), spec: {group: "kubeless.io", version: "v1beta1", scope: "Namespaced", names: {plural: "cronjobtriggers", singular: "cronjobtrigger", kind: "CronJobTrigger"}}, } ]; local deploymentConfig = '{}'; local kubelessConfig = configMap.default("kubeless-config", namespace) + configMap.data({"ingress-enabled": "false"}) + configMap.data({"service-type": "ClusterIP"})+ configMap.data({"deployment": std.toString(deploymentConfig)})+ configMap.data({"runtime-images": std.toString(runtimesSrc)})+ configMap.data({"enable-build-step": "false"})+ configMap.data({"function-registry-tls-verify": "true"})+ configMap.data({"provision-image": "kubeless/unzip@sha256:e867f9b366ffb1a25f14baf83438db426ced4f7add56137b7300d32507229b5a"})+ configMap.data({"provision-image-secret": ""})+ configMap.data({"builder-image": "kubeless/function-image-builder:latest"})+ configMap.data({"builder-image-secret": ""}); { controllerAccount: k.util.prune(controllerAccount), controller: k.util.prune(controllerDeployment), crd: k.util.prune(crd), cfg: k.util.prune(kubelessConfig), } ================================================ FILE: kubeless-openshift.jsonnet ================================================ # Builds on kubeless.ksonnet to produce a deployable manifest on OpenShift 1.5 # Modifies apiVersion for kubeless-controller Deployment to extensions/v1beta1 # Modifies ClusterRole and ClusterRoleBinding apiVersions to v1 local k = import "ksonnet.beta.1/k.libsonnet"; local kubeless = import "kubeless.jsonnet"; local config = kubeless.cfg + k.core.v1.configMap.data({"deployment":'{"spec":{"template":{"spec":{"securityContext":{}}}}}'}); kubeless + { controller: kubeless.controller + { apiVersion: "extensions/v1beta1" }, controllerClusterRole: kubeless.controllerClusterRole + { apiVersion: "v1" }, controllerClusterRoleBinding: kubeless.controllerClusterRoleBinding + { apiVersion: "v1" }, cfg: config, } ================================================ FILE: kubeless.jsonnet ================================================ # Add RBAC role and binding on top of kubeless.jsonnet, to allow # kubeless controller to deploy/update/etc functions on any namespace local k = import "ksonnet.beta.1/k.libsonnet"; local objectMeta = k.core.v1.objectMeta; local kubeless = import "kubeless-non-rbac.jsonnet"; local controller_account = kubeless.controller_account; local controller_roles = [ { apiGroups: [""], resources: ["services", "configmaps"], verbs: ["create", "get", "delete", "list", "update", "patch"], }, { apiGroups: ["apps", "extensions"], resources: ["deployments"], verbs: ["create", "get", "delete", "list", "update", "patch"], }, { apiGroups: [""], resources: ["pods"], verbs: ["list", "delete"], }, { apiGroups: [""], resources: ["secrets"], resourceNames: ["kubeless-registry-credentials"], verbs: ["get"], }, { apiGroups: ["kubeless.io"], resources: ["functions", "httptriggers", "cronjobtriggers"], verbs: ["get", "list", "watch", "update", "delete"], }, { apiGroups: ["batch"], resources: ["cronjobs", "jobs"], verbs: ["create", "get", "delete", "deletecollection", "list", "update", "patch"], }, { apiGroups: ["autoscaling"], resources: ["horizontalpodautoscalers"], verbs: ["create", "get", "delete", "list", "update", "patch"], }, { apiGroups: ["apiextensions.k8s.io"], resources: ["customresourcedefinitions"], verbs: ["get", "list"], }, { apiGroups: ["monitoring.coreos.com"], resources: ["alertmanagers", "prometheuses", "servicemonitors"], verbs: ["*"], }, { apiGroups: ["extensions"], resources: ["ingresses"], verbs: ["create", "get", "list", "update", "delete"], }, ]; local controllerAccount = kubeless.controllerAccount; local clusterRole(name, rules) = { apiVersion: "rbac.authorization.k8s.io/v1beta1", kind: "ClusterRole", metadata: objectMeta.name(name), rules: rules, }; local clusterRoleBinding(name, role, subjects) = { apiVersion: "rbac.authorization.k8s.io/v1beta1", kind: "ClusterRoleBinding", metadata: objectMeta.name(name), subjects: [{kind: s.kind, namespace: s.metadata.namespace, name: s.metadata.name} for s in subjects], roleRef: {kind: role.kind, apiGroup: "rbac.authorization.k8s.io", name: role.metadata.name}, }; local controllerClusterRole = clusterRole( "kubeless-controller-deployer", controller_roles); local controllerClusterRoleBinding = clusterRoleBinding( "kubeless-controller-deployer", controllerClusterRole, [controllerAccount] ); kubeless + { controllerClusterRole: controllerClusterRole, controllerClusterRoleBinding: controllerClusterRoleBinding, } ================================================ FILE: manifests/README.md ================================================ # Collection of manifests for development **NOTE: TO INSTALL KUBELESS USE A RELEASED MANIFEST AT https://github.com/kubeless/kubeless/releases" In this folder you can find several manifest that you can deploy to extend the base functionality of Kubeless. ================================================ FILE: manifests/autoscaling/custom-metrics.yaml ================================================ kind: Namespace apiVersion: v1 metadata: name: custom-metrics --- kind: ServiceAccount apiVersion: v1 metadata: name: custom-metrics-apiserver namespace: custom-metrics --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: custom-metrics:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: custom-metrics-apiserver namespace: custom-metrics --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: custom-metrics-auth-reader namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: extension-apiserver-authentication-reader subjects: - kind: ServiceAccount name: custom-metrics-apiserver namespace: custom-metrics --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: custom-metrics-read rules: - apiGroups: - "" resources: - namespaces - pods - services verbs: - get - list --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: custom-metrics-read roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-read subjects: - kind: ServiceAccount name: custom-metrics-apiserver namespace: custom-metrics --- apiVersion: apps/v1beta1 kind: Deployment metadata: name: custom-metrics-apiserver namespace: custom-metrics labels: app: custom-metrics-apiserver spec: replicas: 1 template: metadata: name: custom-metrics-apiserver labels: app: custom-metrics-apiserver spec: serviceAccountName: custom-metrics-apiserver containers: - name: custom-metrics-server image: luxas/k8s-prometheus-adapter args: - --prometheus-url=http://sample-metrics-prom.default.svc:9090 - --metrics-relist-interval=30s - --rate-interval=60s - --v=10 - --logtostderr=true ports: - containerPort: 443 securityContext: runAsUser: 0 --- apiVersion: v1 kind: Service metadata: name: api namespace: custom-metrics spec: ports: - port: 443 targetPort: 443 selector: app: custom-metrics-apiserver --- apiVersion: apiregistration.k8s.io/v1beta1 kind: APIService metadata: name: v1alpha1.custom-metrics.metrics.k8s.io spec: insecureSkipTLSVerify: true group: custom-metrics.metrics.k8s.io groupPriorityMinimum: 1000 versionPriority: 5 service: name: api namespace: custom-metrics version: v1alpha1 --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: custom-metrics-server-resources rules: - apiGroups: - custom-metrics.metrics.k8s.io resources: ["*"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: hpa-controller-custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-server-resources subjects: - kind: ServiceAccount name: horizontal-pod-autoscaler namespace: kube-system ================================================ FILE: manifests/autoscaling/prometheus-operator.yaml ================================================ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: prometheus-operator rules: - apiGroups: - extensions resources: - thirdpartyresources verbs: - create - apiGroups: - monitoring.coreos.com resources: - alertmanagers - prometheuses - servicemonitors verbs: - "*" - apiGroups: - apps resources: - statefulsets verbs: ["*"] - apiGroups: [""] resources: - configmaps - secrets verbs: ["*"] - apiGroups: [""] resources: - pods verbs: ["list", "delete"] - apiGroups: [""] resources: - services - endpoints verbs: ["get", "create", "update"] - apiGroups: [""] resources: - nodes verbs: ["list", "watch"] --- apiVersion: v1 kind: ServiceAccount metadata: name: prometheus-operator --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: prometheus-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: prometheus-operator subjects: - kind: ServiceAccount name: prometheus-operator namespace: default --- apiVersion: apps/v1beta1 kind: Deployment metadata: name: prometheus-operator labels: operator: prometheus spec: replicas: 1 template: metadata: labels: operator: prometheus spec: serviceAccountName: prometheus-operator containers: - name: prometheus-operator image: luxas/prometheus-operator:v0.10.1 resources: requests: cpu: 100m memory: 50Mi limits: cpu: 200m memory: 100Mi ================================================ FILE: manifests/autoscaling/sample-metrics-app.yaml ================================================ apiVersion: apps/v1beta1 kind: Deployment metadata: labels: app: sample-metrics-app name: sample-metrics-app spec: replicas: 2 template: metadata: labels: app: sample-metrics-app spec: containers: - image: luxas/autoscale-demo:v0.1.2 name: sample-metrics-app ports: - name: web containerPort: 8080 readinessProbe: httpGet: path: / port: 8080 initialDelaySeconds: 3 periodSeconds: 5 livenessProbe: httpGet: path: / port: 8080 initialDelaySeconds: 3 periodSeconds: 5 --- apiVersion: v1 kind: Service metadata: name: sample-metrics-app labels: app: sample-metrics-app spec: ports: - name: web port: 80 targetPort: 8080 selector: app: sample-metrics-app --- apiVersion: monitoring.coreos.com/v1alpha1 kind: ServiceMonitor metadata: name: sample-metrics-app labels: service-monitor: function spec: selector: matchLabels: app: sample-metrics-app endpoints: - port: web --- kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2alpha1 metadata: name: sample-metrics-app-hpa spec: scaleTargetRef: kind: Deployment name: sample-metrics-app minReplicas: 2 maxReplicas: 10 metrics: - type: Object object: target: kind: Service name: sample-metrics-app metricName: http_requests targetValue: 100 ================================================ FILE: manifests/autoscaling/sample-prometheus-instance.yaml ================================================ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: prometheus rules: - apiGroups: - "" resources: - nodes - services - endpoints - pods verbs: - get - list - watch --- apiVersion: v1 kind: ServiceAccount metadata: name: prometheus --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: prometheus roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: prometheus subjects: - kind: ServiceAccount name: prometheus namespace: default --- apiVersion: monitoring.coreos.com/v1alpha1 kind: Prometheus metadata: name: sample-metrics-prom labels: app: sample-metrics-prom prometheus: sample-metrics-prom spec: replicas: 1 baseImage: prom/prometheus version: v1.7.1 serviceAccountName: prometheus serviceMonitorSelector: matchLabels: service-monitor: function resources: requests: memory: 300Mi #storage: # resources: # requests: # storage: 3Gi --- apiVersion: v1 kind: Service metadata: name: sample-metrics-prom labels: app: sample-metrics-prom prometheus: sample-metrics-prom spec: type: NodePort ports: - name: web nodePort: 30999 port: 9090 targetPort: web selector: prometheus: sample-metrics-prom ================================================ FILE: manifests/kinesis/kinesalite.yaml ================================================ --- apiVersion: v1 kind: Service metadata: annotations: name: kinesis labels: app: kinesis spec: type: NodePort ports: - port: 4567 selector: app: kinesis --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kinesis spec: replicas: 1 template: metadata: labels: app: kinesis spec: containers: - name: kinesis image: saikocat/kinesalite:1.11.5 ports: - containerPort: 4567 args: - --port=4567 ================================================ FILE: manifests/monitoring/grafana-configmap.yaml ================================================ apiVersion: v1 data: grafana-net-2-dashboard.json: | { "__inputs": [{ "name": "DS_PROMETHEUS", "label": "Prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" }], "__requires": [{ "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" }, { "type": "panel", "id": "text", "name": "Text", "version": "" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.0" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" }], "id": null, "title": "Prometheus Stats", "tags": [], "style": "dark", "timezone": "browser", "editable": true, "hideControls": true, "sharedCrosshair": false, "rows": [{ "collapse": false, "editable": true, "height": 178, "panels": [{ "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": ["rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)"], "datasource": "${DS_PROMETHEUS}", "decimals": 1, "editable": true, "error": false, "format": "s", "id": 5, "interval": null, "links": [], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "span": 3, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "(time() - process_start_time_seconds{job=\"prometheus\"})", "intervalFactor": 2, "refId": "A", "step": 4 }], "thresholds": "", "title": "Uptime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current", "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "rangeMaps": [{ "from": "null", "to": "null", "text": "N/A" }], "mappingType": 1, "gauge": { "show": false, "minValue": 0, "maxValue": 100, "thresholdMarkers": true, "thresholdLabels": false } }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": ["rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)"], "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "format": "none", "id": 6, "interval": null, "links": [], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "span": 3, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": true }, "targets": [{ "expr": "prometheus_local_storage_memory_series", "intervalFactor": 2, "refId": "A", "step": 4 }], "thresholds": "1,5", "title": "Local Storage Memory Series", "type": "singlestat", "valueFontSize": "70%", "valueMaps": [], "valueName": "current", "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "rangeMaps": [{ "from": "null", "to": "null", "text": "N/A" }], "mappingType": 1, "gauge": { "show": false, "minValue": 0, "maxValue": 100, "thresholdMarkers": true, "thresholdLabels": false } }, { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": ["rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)"], "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "format": "none", "id": 7, "interval": null, "links": [], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "span": 3, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": true }, "targets": [{ "expr": "prometheus_local_storage_indexing_queue_length", "intervalFactor": 2, "refId": "A", "step": 4 }], "thresholds": "500,4000", "title": "Interal Storage Queue Length", "type": "singlestat", "valueFontSize": "70%", "valueMaps": [{ "op": "=", "text": "Empty", "value": "0" }], "valueName": "current", "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "rangeMaps": [{ "from": "null", "to": "null", "text": "N/A" }], "mappingType": 1, "gauge": { "show": false, "minValue": 0, "maxValue": 100, "thresholdMarkers": true, "thresholdLabels": false } }, { "content": "\"Prometheus\nPrometheus\n\n

You're using Prometheus, an open-source systems monitoring and alerting toolkit originally built at SoundCloud. For more information, check out the Grafana and Prometheus projects.

", "editable": true, "error": false, "id": 9, "links": [], "mode": "html", "span": 3, "style": {}, "title": "", "transparent": true, "type": "text" }], "title": "New row" }, { "collapse": false, "editable": true, "height": 227, "panels": [{ "aliasColors": { "prometheus": "#C15C17", "{instance=\"localhost:9090\",job=\"prometheus\"}": "#C15C17" }, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 3, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 9, "stack": false, "steppedLine": false, "targets": [{ "expr": "rate(prometheus_local_storage_ingested_samples_total[5m])", "interval": "", "intervalFactor": 2, "legendFormat": "{{job}}", "metric": "", "refId": "A", "step": 2 }], "timeFrom": null, "timeShift": null, "title": "Samples ingested (rate-5m)", "tooltip": { "shared": true, "value_type": "cumulative", "ordering": "alphabetical", "msResolution": false }, "type": "graph", "yaxes": [{ "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }, { "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }], "xaxis": { "show": true } }, { "content": "#### Samples Ingested\nThis graph displays the count of samples ingested by the Prometheus server, as measured over the last 5 minutes, per time series in the range vector. When troubleshooting an issue on IRC or Github, this is often the first stat requested by the Prometheus team. ", "editable": true, "error": false, "id": 8, "links": [], "mode": "markdown", "span": 2.995914043583536, "style": {}, "title": "", "transparent": true, "type": "text" }], "title": "New row" }, { "collapse": false, "editable": true, "height": "250px", "panels": [{ "aliasColors": { "prometheus": "#F9BA8F", "{instance=\"localhost:9090\",interval=\"5s\",job=\"prometheus\"}": "#F9BA8F" }, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 5, "stack": false, "steppedLine": false, "targets": [{ "expr": "rate(prometheus_target_interval_length_seconds_count[5m])", "intervalFactor": 2, "legendFormat": "{{job}}", "refId": "A", "step": 2 }], "timeFrom": null, "timeShift": null, "title": "Target Scrapes (last 5m)", "tooltip": { "shared": true, "value_type": "cumulative", "ordering": "alphabetical", "msResolution": false }, "type": "graph", "yaxes": [{ "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }, { "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }], "xaxis": { "show": true } }, { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 14, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 4, "stack": false, "steppedLine": false, "targets": [{ "expr": "prometheus_target_interval_length_seconds{quantile!=\"0.01\", quantile!=\"0.05\"}", "interval": "", "intervalFactor": 2, "legendFormat": "{{quantile}} ({{interval}})", "metric": "", "refId": "A", "step": 2 }], "timeFrom": null, "timeShift": null, "title": "Scrape Duration", "tooltip": { "shared": true, "value_type": "cumulative", "ordering": "alphabetical", "msResolution": false }, "type": "graph", "yaxes": [{ "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }, { "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }], "xaxis": { "show": true } }, { "content": "#### Scrapes\nPrometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. Target scrapes will show how frequently targets are scraped, as measured over the last 5 minutes, per time series in the range vector. Scrape Duration will show how long the scrapes are taking, with percentiles available as series. ", "editable": true, "error": false, "id": 11, "links": [], "mode": "markdown", "span": 3, "style": {}, "title": "", "transparent": true, "type": "text" }], "title": "New row" }, { "collapse": false, "editable": true, "height": "250px", "panels": [{ "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": null, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 12, "legend": { "alignAsTable": false, "avg": false, "current": false, "hideEmpty": true, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 9, "stack": false, "steppedLine": false, "targets": [{ "expr": "prometheus_evaluator_duration_milliseconds{quantile!=\"0.01\", quantile!=\"0.05\"}", "interval": "", "intervalFactor": 2, "legendFormat": "{{quantile}}", "refId": "A", "step": 2 }], "timeFrom": null, "timeShift": null, "title": "Rule Eval Duration", "tooltip": { "shared": true, "value_type": "cumulative", "ordering": "alphabetical", "msResolution": false }, "type": "graph", "yaxes": [{ "show": true, "min": null, "max": null, "logBase": 1, "format": "percentunit", "label": "" }, { "show": true, "min": null, "max": null, "logBase": 1, "format": "short" }], "xaxis": { "show": true } }, { "content": "#### Rule Evaluation Duration\nThis graph panel plots the duration for all evaluations to execute. The 50th percentile, 90th percentile and 99th percentile are shown as three separate series to help identify outliers that may be skewing the data.", "editable": true, "error": false, "id": 15, "links": [], "mode": "markdown", "span": 3, "style": {}, "title": "", "transparent": true, "type": "text" }], "title": "New row" }], "time": { "from": "now-5m", "to": "now" }, "timepicker": { "now": true, "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"], "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] }, "templating": { "list": [] }, "annotations": { "list": [] }, "refresh": false, "schemaVersion": 12, "version": 0, "links": [{ "icon": "info", "tags": [], "targetBlank": true, "title": "Grafana Docs", "tooltip": "", "type": "link", "url": "http://www.grafana.org/docs" }, { "icon": "info", "tags": [], "targetBlank": true, "title": "Prometheus Docs", "type": "link", "url": "http://prometheus.io/docs/introduction/overview/" }], "gnetId": 2, "description": "The official, pre-built Prometheus Stats Dashboard." } grafana-net-737-dashboard.json: | { "__inputs": [{ "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" }], "__requires": [{ "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.0" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" }], "id": null, "title": "Kubernetes Pod Resources", "description": "Shows resource usage of Kubernetes pods.", "tags": [ "kubernetes" ], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [{ "collapse": false, "editable": true, "height": "250px", "panels": [{ "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 4, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum (container_memory_working_set_bytes{id=\"/\",instance=~\"^$instance$\"}) / sum (machine_memory_bytes{instance=~\"^$instance$\"}) * 100", "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 2 }], "thresholds": "65, 90", "timeFrom": "1m", "timeShift": null, "title": "Memory Working Set", "transparent": false, "type": "singlestat", "valueFontSize": "80%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 6, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum(rate(container_cpu_usage_seconds_total{id=\"/\",instance=~\"^$instance$\"}[1m])) / sum (machine_cpu_cores{instance=~\"^$instance$\"}) * 100", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "65, 90", "timeFrom": "1m", "timeShift": null, "title": "Cpu Usage", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": true, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "percent", "gauge": { "maxValue": 100, "minValue": 0, "show": true, "thresholdLabels": false, "thresholdMarkers": true }, "height": "180px", "id": 7, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 4, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum(container_fs_usage_bytes{id=\"/\",instance=~\"^$instance$\"}) / sum(container_fs_limit_bytes{id=\"/\",instance=~\"^$instance$\"}) * 100", "interval": "10s", "intervalFactor": 1, "legendFormat": "", "metric": "", "refId": "A", "step": 10 }], "thresholds": "65, 90", "timeFrom": "1m", "timeShift": null, "title": "Filesystem Usage", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 9, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "20%", "prefix": "", "prefixFontSize": "20%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum(container_memory_working_set_bytes{id=\"/\",instance=~\"^$instance$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 10, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum (machine_memory_bytes{instance=~\"^$instance$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "none", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 11, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": " cores", "postfixFontSize": "30%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",instance=~\"^$instance$\"}[1m]))", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "timeShift": null, "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "none", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 12, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": " cores", "postfixFontSize": "30%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum (machine_cpu_cores{instance=~\"^$instance$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 13, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum(container_fs_usage_bytes{id=\"/\",instance=~\"^$instance$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "title": "Used", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "height": "1px", "hideTimeOverride": true, "id": 14, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [{ "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 }], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [{ "from": "null", "text": "N/A", "to": "null" }], "span": 2, "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "targets": [{ "expr": "sum (container_fs_limit_bytes{id=\"/\",instance=~\"^$instance$\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", "step": 10 }], "thresholds": "", "timeFrom": "1m", "title": "Total", "type": "singlestat", "valueFontSize": "50%", "valueMaps": [{ "op": "=", "text": "N/A", "value": "null" }], "valueName": "current" }, { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)", "thresholdLine": false }, "height": "200px", "id": 32, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [{ "expr": "sum(rate(container_network_receive_bytes_total{instance=~\"^$instance$\",namespace=~\"^$namespace$\"}[1m]))", "interval": "", "intervalFactor": 2, "legendFormat": "receive", "metric": "network", "refId": "A", "step": 240 }, { "expr": "- sum(rate(container_network_transmit_bytes_total{instance=~\"^$instance$\",namespace=~\"^$namespace$\"}[1m]))", "interval": "", "intervalFactor": 2, "legendFormat": "transmit", "metric": "network", "refId": "B", "step": 240 }], "timeFrom": null, "timeShift": null, "title": "Network", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "transparent": false, "type": "graph", "xaxis": { "show": true }, "yaxes": [{ "format": "Bps", "label": "transmit / receive", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "Bps", "label": null, "logBase": 1, "max": null, "min": null, "show": false }] }], "showTitle": true, "title": "all pods" }, { "collapse": false, "editable": true, "height": "250px", "panels": [{ "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 3, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "height": "", "id": 17, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": true, "hideZero": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": null, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [{ "expr": "sum(rate(container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",instance=~\"^$instance$\",namespace=~\"^$namespace$\"}[1m])) by (pod_name)", "interval": "", "intervalFactor": 2, "legendFormat": "{{ pod_name }}", "metric": "container_cpu", "refId": "A", "step": 240 }], "timeFrom": null, "timeShift": null, "title": "Cpu Usage", "tooltip": { "msResolution": true, "shared": false, "sort": 2, "value_type": "cumulative" }, "transparent": false, "type": "graph", "xaxis": { "show": true }, "yaxes": [{ "format": "none", "label": "cores", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false }] }, { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 0, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 33, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": true, "hideZero": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": null, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [{ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",instance=~\"^$instance$\",namespace=~\"^$namespace$\"}) by (pod_name)", "interval": "", "intervalFactor": 2, "legendFormat": "{{ pod_name }}", "metric": "", "refId": "A", "step": 240 }], "timeFrom": null, "timeShift": null, "title": "Memory Working Set", "tooltip": { "msResolution": false, "shared": false, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [{ "format": "bytes", "label": "used", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false }] }, { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 16, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": true, "hideZero": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [{ "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",instance=~\"^$instance$\",namespace=~\"^$namespace$\"}[1m])) by (pod_name)", "interval": "", "intervalFactor": 2, "legendFormat": "{{ pod_name }} < in", "metric": "network", "refId": "A", "step": 240 }, { "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",instance=~\"^$instance$\",namespace=~\"^$namespace$\"}[1m])) by (pod_name)", "interval": "", "intervalFactor": 2, "legendFormat": "{{ pod_name }} > out", "metric": "network", "refId": "B", "step": 240 }], "timeFrom": null, "timeShift": null, "title": "Network", "tooltip": { "msResolution": false, "shared": false, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [{ "format": "Bps", "label": "transmit / receive", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false }] }, { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "decimals": 2, "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 34, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": true, "hideZero": true, "max": false, "min": false, "rightSide": true, "show": true, "sideWidth": 200, "sort": "current", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [{ "expr": "sum(container_fs_usage_bytes{image!=\"\",name=~\"^k8s_.*\",instance=~\"^$instance$\",namespace=~\"^$namespace$\"}) by (pod_name)", "interval": "", "intervalFactor": 2, "legendFormat": "{{ pod_name }}", "metric": "network", "refId": "A", "step": 240 }], "timeFrom": null, "timeShift": null, "title": "Filesystem", "tooltip": { "msResolution": false, "shared": false, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [{ "format": "bytes", "label": "used", "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false }] }], "showTitle": true, "title": "each pod" }], "time": { "from": "now-3d", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [{ "allValue": ".*", "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": true, "label": "Instance", "multi": false, "name": "instance", "options": [], "query": "label_values(instance)", "refresh": 1, "regex": "", "type": "query" }, { "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": true, "label": "Namespace", "multi": true, "name": "namespace", "options": [], "query": "label_values(namespace)", "refresh": 1, "regex": "", "type": "query" }] }, "annotations": { "list": [] }, "refresh": false, "schemaVersion": 12, "version": 8, "links": [], "gnetId": 737 } prometheus-datasource.json: | { "name": "prometheus", "type": "prometheus", "url": "http://prometheus:9090", "access": "proxy", "basicAuth": false } kind: ConfigMap metadata: creationTimestamp: null name: grafana-import-dashboards namespace: monitoring ================================================ FILE: manifests/monitoring/grafana-deployment.yaml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: grafana-core namespace: monitoring labels: app: grafana component: core spec: replicas: 1 template: metadata: labels: app: grafana component: core spec: containers: - image: grafana/grafana:3.1.1 name: grafana-core # env: resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 100Mi requests: cpu: 100m memory: 100Mi env: # The following env variables set up basic auth twith the default admin user and admin password. - name: GF_AUTH_BASIC_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ENABLED value: "false" # - name: GF_AUTH_ANONYMOUS_ORG_ROLE # value: Admin # does not really work, because of template variables in exported dashboards: # - name: GF_DASHBOARDS_JSON_ENABLED # value: "true" volumeMounts: - name: grafana-persistent-storage mountPath: /var volumes: - name: grafana-persistent-storage emptyDir: {} ================================================ FILE: manifests/monitoring/grafana-job.yaml ================================================ apiVersion: batch/v1 kind: Job metadata: name: grafana-import-dashboards namespace: monitoring labels: app: grafana component: import-dashboards spec: template: metadata: name: grafana-import-dashboards labels: app: grafana component: import-dashboards spec: containers: - name: grafana-import-dashboards image: giantswarm/tiny-tools command: ["/bin/sh", "-c"] workingDir: /opt/grafana-import-dashboards args: # FIXME use kubernetes probe instead of "until curl" - > until $(curl --silent --fail --show-error --output /dev/null http://admin:admin@grafana:3000/api/datasources); do printf '.' ; sleep 1 ; done ; for file in *-datasource.json ; do if [ -e "$file" ] ; then echo "importing $file" && curl --silent --fail --show-error \ --request POST http://admin:admin@grafana:3000/api/datasources \ --header "Content-Type: application/json" \ --data-binary "@$file" ; echo "" ; fi done ; for file in *-dashboard.json ; do if [ -e "$file" ] ; then echo "importing $file" && cat "$file" \ | xargs -0 printf '{"dashboard":%s,"overwrite":true,"inputs":[{"name":"DS_PROMETHEUS","type":"datasource","pluginId":"prometheus","value":"prometheus"}]}' \ | jq -c '.' \ | curl --silent --fail --show-error \ --request POST http://admin:admin@grafana:3000/api/dashboards/import \ --header "Content-Type: application/json" \ --data-binary "@-" ; echo "" ; fi done volumeMounts: - name: config-volume mountPath: /opt/grafana-import-dashboards restartPolicy: Never volumes: - name: config-volume configMap: name: grafana-import-dashboards ================================================ FILE: manifests/monitoring/grafana-service.yaml ================================================ apiVersion: v1 kind: Service metadata: name: grafana namespace: monitoring labels: app: grafana component: core spec: type: NodePort ports: - port: 3000 selector: app: grafana component: core ================================================ FILE: manifests/monitoring/prometheus.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: monitoring --- kind: ConfigMap metadata: name: prometheus-config namespace: monitoring apiVersion: v1 data: prometheus.yml: |- global: scrape_interval: 30s scrape_timeout: 30s scrape_configs: - job_name: 'kubernetes-cluster' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: apiserver - job_name: 'kubernetes-nodes' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt insecure_skip_verify: true kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - job_name: 'kubernetes-service-endpoints' scheme: http kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: endpoint relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name - job_name: 'kubernetes-services' scheme: http kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: service relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name - job_name: 'kubernetes-pods' scheme: http kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: pod relabel_configs: - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] action: replace regex: (.+):(?:\\d+);(\\d+) replacement: ${1}:${2} target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_pod_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name - source_labels: [__meta_kubernetes_pod_node_name] action: replace target_label: kubernetes_pod_node_name --- apiVersion: v1 kind: Service metadata: name: prometheus namespace: monitoring spec: ports: - port: 9090 protocol: TCP targetPort: 9090 selector: name: prometheus type: NodePort --- apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: name: prometheus name: prometheus namespace: monitoring spec: replicas: 1 selector: matchLabels: name: prometheus strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: creationTimestamp: null labels: name: prometheus annotations: prometheus.io/scrape: "true" prometheus.io/port: "9090" spec: containers: - args: - -config.file=/etc/prometheus/prometheus.yml - -storage.local.path=/prometheus - -storage.local.retention=24h command: - /bin/prometheus image: quay.io/prometheus/prometheus:v1.1.3 imagePullPolicy: IfNotPresent name: prometheus ports: - containerPort: 9090 protocol: TCP resources: limits: cpu: 500m memory: 2500Mi requests: cpu: 100m memory: 100Mi volumeMounts: - mountPath: /prometheus name: data - mountPath: /etc/prometheus name: config-volume restartPolicy: Always securityContext: {} terminationGracePeriodSeconds: 30 volumes: - emptyDir: {} name: data - configMap: name: prometheus-config name: config-volume ================================================ FILE: manifests/nats/nats-cluster.yaml ================================================ apiVersion: "nats.io/v1alpha2" kind: "NatsCluster" metadata: name: "nats" spec: size: 2 version: "1.1.0" ================================================ FILE: manifests/ui/README.md ================================================ # Kubeless UI You can find the latest manifest for deploying the UI in the releases page of the kubeless-ui repository: https://github.com/kubeless/kubeless-ui/releases ================================================ FILE: pkg/apis/kubeless/register.go ================================================ package kubeless const ( // GroupName is ApiGroup for the Kubeless API GroupName = "kubeless.io" ) ================================================ FILE: pkg/apis/kubeless/v1beta1/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // Package v1beta1 is the v1beta1 version of the Kubeless API // +groupName=kubeless.io package v1beta1 ================================================ FILE: pkg/apis/kubeless/v1beta1/function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Function object type Function struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` Spec FunctionSpec `json:"spec"` } // FunctionSpec contains func specification type FunctionSpec struct { Handler string `json:"handler"` // Function handler: "file.function" Function string `json:"function"` // Function file content or URL of the function FunctionContentType string `json:"function-content-type"` // Function file content type (plain text, url, base64, zip or compressedtar) Checksum string `json:"checksum"` // Checksum of the file Runtime string `json:"runtime"` // Function runtime to use Timeout string `json:"timeout"` // Maximum timeout for the function to complete its execution Deps string `json:"deps"` // Function dependencies Deployment appsv1.Deployment `json:"deployment" protobuf:"bytes,3,opt,name=template"` ServiceSpec v1.ServiceSpec `json:"service"` HorizontalPodAutoscaler v2beta1.HorizontalPodAutoscaler `json:"horizontalPodAutoscaler" protobuf:"bytes,3,opt,name=horizontalPodAutoscaler"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // FunctionList contains map of functions type FunctionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` // Items is a list of third party objects Items []*Function `json:"items"` } ================================================ FILE: pkg/apis/kubeless/v1beta1/register.go ================================================ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" kubeless "github.com/kubeless/kubeless/pkg/apis/kubeless" ) // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: kubeless.GroupName, Version: "v1beta1"} // Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( // SchemeBuilder collects the scheme builder functions for the Kubeless API SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme applies the SchemeBuilder functions to a specified scheme AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Function{}, &FunctionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } ================================================ FILE: pkg/apis/kubeless/v1beta1/zz_generated.deepcopy.go ================================================ // +build !ignore_autogenerated /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Function) DeepCopyInto(out *Function) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. func (in *Function) DeepCopy() *Function { if in == nil { return nil } out := new(Function) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Function) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionList) DeepCopyInto(out *FunctionList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]*Function, len(*in)) for i := range *in { if (*in)[i] == nil { (*out)[i] = nil } else { (*out)[i] = new(Function) (*in)[i].DeepCopyInto((*out)[i]) } } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. func (in *FunctionList) DeepCopy() *FunctionList { if in == nil { return nil } out := new(FunctionList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *FunctionList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { *out = *in in.Deployment.DeepCopyInto(&out.Deployment) in.ServiceSpec.DeepCopyInto(&out.ServiceSpec) in.HorizontalPodAutoscaler.DeepCopyInto(&out.HorizontalPodAutoscaler) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. func (in *FunctionSpec) DeepCopy() *FunctionSpec { if in == nil { return nil } out := new(FunctionSpec) in.DeepCopyInto(out) return out } ================================================ FILE: pkg/client/clientset/versioned/clientset.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package versioned import ( glog "github.com/golang/glog" kubelessv1beta1 "github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" ) type Interface interface { Discovery() discovery.DiscoveryInterface KubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface // Deprecated: please explicitly pick a version if possible. Kubeless() kubelessv1beta1.KubelessV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient kubelessV1beta1 *kubelessv1beta1.KubelessV1beta1Client } // KubelessV1beta1 retrieves the KubelessV1beta1Client func (c *Clientset) KubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface { return c.kubelessV1beta1 } // Deprecated: Kubeless retrieves the default version of KubelessClient. // Please explicitly pick a version. func (c *Clientset) Kubeless() kubelessv1beta1.KubelessV1beta1Interface { return c.kubelessV1beta1 } // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { return nil } return c.DiscoveryClient } // NewForConfig creates a new Clientset for the given config. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error cs.kubelessV1beta1, err = kubelessv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil } // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.kubelessV1beta1 = kubelessv1beta1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs } // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset cs.kubelessV1beta1 = kubelessv1beta1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs } ================================================ FILE: pkg/client/clientset/versioned/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This package has the automatically generated clientset. package versioned ================================================ FILE: pkg/client/clientset/versioned/fake/clientset_generated.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( clientset "github.com/kubeless/kubeless/pkg/client/clientset/versioned" kubelessv1beta1 "github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1" fakekubelessv1beta1 "github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, // without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { panic(err) } } fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } // Clientset implements clientset.Interface. Meant to be embedded into a // struct to get a default implementation. This makes faking out just the method // you want to test easier. type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } var _ clientset.Interface = &Clientset{} // KubelessV1beta1 retrieves the KubelessV1beta1Client func (c *Clientset) KubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface { return &fakekubelessv1beta1.FakeKubelessV1beta1{Fake: &c.Fake} } // Kubeless retrieves the KubelessV1beta1Client func (c *Clientset) Kubeless() kubelessv1beta1.KubelessV1beta1Interface { return &fakekubelessv1beta1.FakeKubelessV1beta1{Fake: &c.Fake} } ================================================ FILE: pkg/client/clientset/versioned/fake/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This package has the automatically generated fake clientset. package fake ================================================ FILE: pkg/client/clientset/versioned/fake/register.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( kubelessv1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" ) var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) func init() { v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) AddToScheme(scheme) } // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // // import ( // "k8s.io/client-go/kubernetes" // clientsetscheme "k8s.io/client-go/kuberentes/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // ) // // kclientset, _ := kubernetes.NewForConfig(c) // aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { kubelessv1beta1.AddToScheme(scheme) } ================================================ FILE: pkg/client/clientset/versioned/scheme/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This package contains the scheme of the automatically generated clientset. package scheme ================================================ FILE: pkg/client/clientset/versioned/scheme/register.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scheme import ( kubelessv1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) func init() { v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) AddToScheme(Scheme) } // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // // import ( // "k8s.io/client-go/kubernetes" // clientsetscheme "k8s.io/client-go/kuberentes/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // ) // // kclientset, _ := kubernetes.NewForConfig(c) // aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { kubelessv1beta1.AddToScheme(scheme) } ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This package has the automatically generated typed clients. package v1beta1 ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/doc.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package fake has the automatically generated clients. package fake ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/fake_function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" ) // FakeFunctions implements FunctionInterface type FakeFunctions struct { Fake *FakeKubelessV1beta1 ns string } var functionsResource = schema.GroupVersionResource{Group: "kubeless.io", Version: "v1beta1", Resource: "functions"} var functionsKind = schema.GroupVersionKind{Group: "kubeless.io", Version: "v1beta1", Kind: "Function"} // Get takes name of the function, and returns the corresponding function object, and an error if there is any. func (c *FakeFunctions) Get(name string, options v1.GetOptions) (result *v1beta1.Function, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(functionsResource, c.ns, name), &v1beta1.Function{}) if obj == nil { return nil, err } return obj.(*v1beta1.Function), err } // List takes label and field selectors, and returns the list of Functions that match those selectors. func (c *FakeFunctions) List(opts v1.ListOptions) (result *v1beta1.FunctionList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(functionsResource, functionsKind, c.ns, opts), &v1beta1.FunctionList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &v1beta1.FunctionList{} for _, item := range obj.(*v1beta1.FunctionList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err } // Watch returns a watch.Interface that watches the requested functions. func (c *FakeFunctions) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(functionsResource, c.ns, opts)) } // Create takes the representation of a function and creates it. Returns the server's representation of the function, and an error, if there is any. func (c *FakeFunctions) Create(function *v1beta1.Function) (result *v1beta1.Function, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(functionsResource, c.ns, function), &v1beta1.Function{}) if obj == nil { return nil, err } return obj.(*v1beta1.Function), err } // Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any. func (c *FakeFunctions) Update(function *v1beta1.Function) (result *v1beta1.Function, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(functionsResource, c.ns, function), &v1beta1.Function{}) if obj == nil { return nil, err } return obj.(*v1beta1.Function), err } // Delete takes name of the function and deletes it. Returns an error if one occurs. func (c *FakeFunctions) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(functionsResource, c.ns, name), &v1beta1.Function{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeFunctions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(functionsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.FunctionList{}) return err } // Patch applies the patch and returns the patched function. func (c *FakeFunctions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(functionsResource, c.ns, name, data, subresources...), &v1beta1.Function{}) if obj == nil { return nil, err } return obj.(*v1beta1.Function), err } ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/fake_kubeless_client.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( v1beta1 "github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) type FakeKubelessV1beta1 struct { *testing.Fake } func (c *FakeKubelessV1beta1) Functions(namespace string) v1beta1.FunctionInterface { return &FakeFunctions{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeKubelessV1beta1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" scheme "github.com/kubeless/kubeless/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" ) // FunctionsGetter has a method to return a FunctionInterface. // A group's client should implement this interface. type FunctionsGetter interface { Functions(namespace string) FunctionInterface } // FunctionInterface has methods to work with Function resources. type FunctionInterface interface { Create(*v1beta1.Function) (*v1beta1.Function, error) Update(*v1beta1.Function) (*v1beta1.Function, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error Get(name string, options v1.GetOptions) (*v1beta1.Function, error) List(opts v1.ListOptions) (*v1beta1.FunctionList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error) FunctionExpansion } // functions implements FunctionInterface type functions struct { client rest.Interface ns string } // newFunctions returns a Functions func newFunctions(c *KubelessV1beta1Client, namespace string) *functions { return &functions{ client: c.RESTClient(), ns: namespace, } } // Get takes name of the function, and returns the corresponding function object, and an error if there is any. func (c *functions) Get(name string, options v1.GetOptions) (result *v1beta1.Function, err error) { result = &v1beta1.Function{} err = c.client.Get(). Namespace(c.ns). Resource("functions"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(). Into(result) return } // List takes label and field selectors, and returns the list of Functions that match those selectors. func (c *functions) List(opts v1.ListOptions) (result *v1beta1.FunctionList, err error) { result = &v1beta1.FunctionList{} err = c.client.Get(). Namespace(c.ns). Resource("functions"). VersionedParams(&opts, scheme.ParameterCodec). Do(). Into(result) return } // Watch returns a watch.Interface that watches the requested functions. func (c *functions) Watch(opts v1.ListOptions) (watch.Interface, error) { opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("functions"). VersionedParams(&opts, scheme.ParameterCodec). Watch() } // Create takes the representation of a function and creates it. Returns the server's representation of the function, and an error, if there is any. func (c *functions) Create(function *v1beta1.Function) (result *v1beta1.Function, err error) { result = &v1beta1.Function{} err = c.client.Post(). Namespace(c.ns). Resource("functions"). Body(function). Do(). Into(result) return } // Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any. func (c *functions) Update(function *v1beta1.Function) (result *v1beta1.Function, err error) { result = &v1beta1.Function{} err = c.client.Put(). Namespace(c.ns). Resource("functions"). Name(function.Name). Body(function). Do(). Into(result) return } // Delete takes name of the function and deletes it. Returns an error if one occurs. func (c *functions) Delete(name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("functions"). Name(name). Body(options). Do(). Error() } // DeleteCollection deletes a collection of objects. func (c *functions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("functions"). VersionedParams(&listOptions, scheme.ParameterCodec). Body(options). Do(). Error() } // Patch applies the patch and returns the patched function. func (c *functions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error) { result = &v1beta1.Function{} err = c.client.Patch(pt). Namespace(c.ns). Resource("functions"). SubResource(subresources...). Name(name). Body(data). Do(). Into(result) return } ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/generated_expansion.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 type FunctionExpansion interface{} ================================================ FILE: pkg/client/clientset/versioned/typed/kubeless/v1beta1/kubeless_client.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned/scheme" serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) type KubelessV1beta1Interface interface { RESTClient() rest.Interface FunctionsGetter } // KubelessV1beta1Client is used to interact with features provided by the kubeless.io group. type KubelessV1beta1Client struct { restClient rest.Interface } func (c *KubelessV1beta1Client) Functions(namespace string) FunctionInterface { return newFunctions(c, namespace) } // NewForConfig creates a new KubelessV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*KubelessV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } client, err := rest.RESTClientFor(&config) if err != nil { return nil, err } return &KubelessV1beta1Client{client}, nil } // NewForConfigOrDie creates a new KubelessV1beta1Client for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *KubelessV1beta1Client { client, err := NewForConfig(c) if err != nil { panic(err) } return client } // New creates a new KubelessV1beta1Client for the given RESTClient. func New(c rest.Interface) *KubelessV1beta1Client { return &KubelessV1beta1Client{c} } func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } return nil } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *KubelessV1beta1Client) RESTClient() rest.Interface { if c == nil { return nil } return c.restClient } ================================================ FILE: pkg/client/informers/externalversions/factory.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package externalversions import ( reflect "reflect" sync "sync" time "time" versioned "github.com/kubeless/kubeless/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces" kubeless "github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) type sharedInformerFactory struct { client versioned.Interface namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc lock sync.Mutex defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool } // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) } // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. // Listers obtained via this SharedInformerFactory will be subject to the same filters // as specified here. func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, namespace: namespace, tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), } } // Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() for informerType, informer := range f.informers { if !f.startedInformers[informerType] { go informer.Run(stopCh) f.startedInformers[informerType] = true } } } // WaitForCacheSync waits for all started informers' cache were synced. func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() defer f.lock.Unlock() informers := map[reflect.Type]cache.SharedIndexInformer{} for informerType, informer := range f.informers { if f.startedInformers[informerType] { informers[informerType] = informer } } return informers }() res := map[reflect.Type]bool{} for informType, informer := range informers { res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) } return res } // InternalInformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() defer f.lock.Unlock() informerType := reflect.TypeOf(obj) informer, exists := f.informers[informerType] if exists { return informer } informer = newFunc(f.client, f.defaultResync) f.informers[informerType] = informer return informer } // SharedInformerFactory provides shared informers for resources in all known // API group versions. type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Kubeless() kubeless.Interface } func (f *sharedInformerFactory) Kubeless() kubeless.Interface { return kubeless.New(f, f.namespace, f.tweakListOptions) } ================================================ FILE: pkg/client/informers/externalversions/generic.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package externalversions import ( "fmt" v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other // sharedInformers based on type type GenericInformer interface { Informer() cache.SharedIndexInformer Lister() cache.GenericLister } type genericInformer struct { informer cache.SharedIndexInformer resource schema.GroupResource } // Informer returns the SharedIndexInformer. func (f *genericInformer) Informer() cache.SharedIndexInformer { return f.informer } // Lister returns the GenericLister. func (f *genericInformer) Lister() cache.GenericLister { return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) } // ForResource gives generic access to a shared informer of the matching type // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=kubeless.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("functions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kubeless().V1beta1().Functions().Informer()}, nil } return nil, fmt.Errorf("no informer found for %v", resource) } ================================================ FILE: pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package internalinterfaces import ( time "time" versioned "github.com/kubeless/kubeless/pkg/client/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" ) type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } type TweakListOptionsFunc func(*v1.ListOptions) ================================================ FILE: pkg/client/informers/externalversions/kubeless/interface.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package kubeless import ( internalinterfaces "github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } type group struct { factory internalinterfaces.SharedInformerFactory namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } ================================================ FILE: pkg/client/informers/externalversions/kubeless/v1beta1/function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package v1beta1 import ( time "time" kubeless_v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" versioned "github.com/kubeless/kubeless/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeless/kubeless/pkg/client/listers/kubeless/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" ) // FunctionInformer provides access to a shared informer and lister for // Functions. type FunctionInformer interface { Informer() cache.SharedIndexInformer Lister() v1beta1.FunctionLister } type functionInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } // NewFunctionInformer constructs a new informer for Function type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewFunctionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { return NewFilteredFunctionInformer(client, namespace, resyncPeriod, indexers, nil) } // NewFilteredFunctionInformer constructs a new informer for Function type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewFilteredFunctionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } return client.KubelessV1beta1().Functions(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } return client.KubelessV1beta1().Functions(namespace).Watch(options) }, }, &kubeless_v1beta1.Function{}, resyncPeriod, indexers, ) } func (f *functionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredFunctionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *functionInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&kubeless_v1beta1.Function{}, f.defaultInformer) } func (f *functionInformer) Lister() v1beta1.FunctionLister { return v1beta1.NewFunctionLister(f.Informer().GetIndexer()) } ================================================ FILE: pkg/client/informers/externalversions/kubeless/v1beta1/interface.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package v1beta1 import ( internalinterfaces "github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { // Functions returns a FunctionInformer. Functions() FunctionInformer } type version struct { factory internalinterfaces.SharedInformerFactory namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // Functions returns a FunctionInformer. func (v *version) Functions() FunctionInformer { return &functionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } ================================================ FILE: pkg/client/listers/kubeless/v1beta1/expansion_generated.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by lister-gen package v1beta1 // FunctionListerExpansion allows custom methods to be added to // FunctionLister. type FunctionListerExpansion interface{} // FunctionNamespaceListerExpansion allows custom methods to be added to // FunctionNamespaceLister. type FunctionNamespaceListerExpansion interface{} ================================================ FILE: pkg/client/listers/kubeless/v1beta1/function.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by lister-gen package v1beta1 import ( v1beta1 "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // FunctionLister helps list Functions. type FunctionLister interface { // List lists all Functions in the indexer. List(selector labels.Selector) (ret []*v1beta1.Function, err error) // Functions returns an object that can list and get Functions. Functions(namespace string) FunctionNamespaceLister FunctionListerExpansion } // functionLister implements the FunctionLister interface. type functionLister struct { indexer cache.Indexer } // NewFunctionLister returns a new FunctionLister. func NewFunctionLister(indexer cache.Indexer) FunctionLister { return &functionLister{indexer: indexer} } // List lists all Functions in the indexer. func (s *functionLister) List(selector labels.Selector) (ret []*v1beta1.Function, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*v1beta1.Function)) }) return ret, err } // Functions returns an object that can list and get Functions. func (s *functionLister) Functions(namespace string) FunctionNamespaceLister { return functionNamespaceLister{indexer: s.indexer, namespace: namespace} } // FunctionNamespaceLister helps list and get Functions. type FunctionNamespaceLister interface { // List lists all Functions in the indexer for a given namespace. List(selector labels.Selector) (ret []*v1beta1.Function, err error) // Get retrieves the Function from the indexer for a given namespace and name. Get(name string) (*v1beta1.Function, error) FunctionNamespaceListerExpansion } // functionNamespaceLister implements the FunctionNamespaceLister // interface. type functionNamespaceLister struct { indexer cache.Indexer namespace string } // List lists all Functions in the indexer for a given namespace. func (s functionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Function, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { ret = append(ret, m.(*v1beta1.Function)) }) return ret, err } // Get retrieves the Function from the indexer for a given namespace and name. func (s functionNamespaceLister) Get(name string) (*v1beta1.Function, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(v1beta1.Resource("function"), name) } return obj.(*v1beta1.Function), nil } ================================================ FILE: pkg/controller/function_controller.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controller import ( "crypto/sha256" "fmt" "net/url" "time" monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/ghodss/yaml" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" kv1beta1 "github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/langruntime" "github.com/kubeless/kubeless/pkg/registry" "github.com/kubeless/kubeless/pkg/utils" ) const ( maxRetries = 5 funcKind = "Function" funcAPIVersion = "kubeless.io/v1beta1" functionFinalizer = "kubeless.io/function" ) // FunctionController object type FunctionController struct { logger *logrus.Entry clientset kubernetes.Interface kubelessclient versioned.Interface smclient *monitoringv1alpha1.MonitoringV1alpha1Client Functions map[string]*kubelessApi.Function queue workqueue.RateLimitingInterface informer cache.SharedIndexInformer config *corev1.ConfigMap langRuntime *langruntime.Langruntimes imagePullSecrets []corev1.LocalObjectReference } // Config contains k8s client of a controller type Config struct { KubeCli kubernetes.Interface FunctionClient versioned.Interface } // NewFunctionController returns a new *FunctionController func NewFunctionController(cfg Config, smclient *monitoringv1alpha1.MonitoringV1alpha1Client) *FunctionController { queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) apiExtensionsClientset := utils.GetAPIExtensionsClientInCluster() config, err := utils.GetKubelessConfig(cfg.KubeCli, apiExtensionsClientset) if err != nil { logrus.Fatalf("Unable to read the configmap: %s", err) } informer := kv1beta1.NewFunctionInformer(cfg.FunctionClient, config.Data["functions-namespace"], 0, cache.Indexers{}) informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } }, UpdateFunc: func(old, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(new) if err == nil { newFunctionObj := new.(*kubelessApi.Function) oldFunctionObj := old.(*kubelessApi.Function) if functionObjChanged(oldFunctionObj, newFunctionObj) { queue.Add(key) } } }, DeleteFunc: func(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err == nil { queue.Add(key) } }, }) var lr = langruntime.New(config) lr.ReadConfigMap() imagePullSecrets := utils.GetSecretsAsLocalObjectReference(config.Data["provision-image-secret"], config.Data["builder-image-secret"]) if config.Data["enable-build-step"] == "true" { imagePullSecrets = append(imagePullSecrets, utils.GetSecretsAsLocalObjectReference("kubeless-registry-credentials")...) } return &FunctionController{ logger: logrus.WithField("pkg", "function-controller"), clientset: cfg.KubeCli, smclient: smclient, kubelessclient: cfg.FunctionClient, informer: informer, queue: queue, config: config, langRuntime: lr, imagePullSecrets: imagePullSecrets, } } // Run starts the kubeless controller func (c *FunctionController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() c.logger.Info("Starting Function controller") go c.informer.Run(stopCh) if !cache.WaitForCacheSync(stopCh, c.HasSynced) { utilruntime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) return } c.logger.Info("Function controller synced and ready") wait.Until(c.runWorker, time.Second, stopCh) } // HasSynced is required for the cache.Controller interface. func (c *FunctionController) HasSynced() bool { return c.informer.HasSynced() } // LastSyncResourceVersion is required for the cache.Controller interface. func (c *FunctionController) LastSyncResourceVersion() string { return c.informer.LastSyncResourceVersion() } func (c *FunctionController) runWorker() { for c.processNextItem() { // continue looping } } func (c *FunctionController) processNextItem() bool { key, quit := c.queue.Get() if quit { return false } defer c.queue.Done(key) err := c.processItem(key.(string)) if err == nil { // No error, reset the ratelimit counters c.queue.Forget(key) } else if c.queue.NumRequeues(key) < maxRetries { c.logger.Errorf("Error processing %s (will retry): %v", key, err) c.queue.AddRateLimited(key) } else { // err != nil and too many retries c.logger.Errorf("Error processing %s (giving up): %v", key, err) c.queue.Forget(key) utilruntime.HandleError(err) } return true } func (c *FunctionController) processItem(key string) error { c.logger.Infof("Processing change to Function %s", key) ns, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } obj, exists, err := c.informer.GetIndexer().GetByKey(key) if err != nil { return fmt.Errorf("Error fetching object with key %s from store: %v", key, err) } // this is an update when Function API object is actually deleted, we dont need to process anything here if !exists { c.logger.Infof("Function object %s not found in the cache, ignoring the deletion update", key) return nil } funcObj := obj.(*kubelessApi.Function) // Function API object is marked for deletion (DeletionTimestamp != nil), so lets process the delete update if funcObj.ObjectMeta.DeletionTimestamp != nil { // If finalizer is removed, then we already processed the delete update, so just return if !utils.FunctionObjHasFinalizer(funcObj, functionFinalizer) { return nil } // Function object should be deleted, so cleanup the associated resources and remove the finalizer err := c.deleteK8sResources(ns, name) if err != nil { c.logger.Errorf("Can't delete function: %v", err) return err } // remove finalizer from the function object, so that we dont have to process any further and object can be deleted err = utils.FunctionObjRemoveFinalizer(c.kubelessclient, funcObj, functionFinalizer) if err != nil { c.logger.Errorf("Failed to remove function controller as finalizer to Function Obj: %s object due to: %v: ", key, err) return err } c.logger.Infof("Function object %s has been successfully processed and marked for deletion", key) return nil } // If function object in not marked with self as finalizer, then add the finalizer if !utils.FunctionObjHasFinalizer(funcObj, functionFinalizer) { err = utils.FunctionObjAddFinalizer(c.kubelessclient, funcObj, functionFinalizer) if err != nil { c.logger.Errorf("Error adding Function controller as finalizer to Function Obj: %s CRD due to: %v: ", key, err) return err } } err = c.ensureK8sResources(funcObj) if err != nil { c.logger.Errorf("Function can not be created/updated: %v", err) return err } c.logger.Infof("Processed change to function: %s", key) return nil } // startImageBuildJob creates (if necessary) a job that will build an image for the given function // returns the name of the image, a boolean indicating if the build job has been created and an error func (c *FunctionController) startImageBuildJob(funcObj *kubelessApi.Function, or []metav1.OwnerReference) (string, bool, error) { imagePullSecret, err := c.clientset.CoreV1().Secrets(funcObj.ObjectMeta.Namespace).Get("kubeless-registry-credentials", metav1.GetOptions{}) if err != nil { return "", false, fmt.Errorf("Unable to locate registry credentials to build function image: %v", err) } reg, err := registry.New(*imagePullSecret) if err != nil { return "", false, fmt.Errorf("Unable to retrieve registry information: %v", err) } // Use function content and deps as tag (digested) tag := fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%v%v", funcObj.Spec.Function, funcObj.Spec.Deps)))) imageName := fmt.Sprintf("%s/%s", reg.Creds.Username, funcObj.ObjectMeta.Name) // Check if image already exists exists, err := reg.ImageExists(imageName, tag) if err != nil { return "", false, fmt.Errorf("Unable to check is target image exists: %v", err) } regURL, err := url.Parse(reg.Endpoint) if err != nil { return "", false, fmt.Errorf("Unable to parse registry URL: %v", err) } image := fmt.Sprintf("%s/%s:%s", regURL.Host, imageName, tag) if !exists { tlsVerify := true if c.config.Data["function-registry-tls-verify"] == "false" { tlsVerify = false } err = utils.EnsureFuncImage(c.clientset, funcObj, c.langRuntime, or, imageName, tag, c.config.Data["builder-image"], regURL.Host, imagePullSecret.Name, c.config.Data["provision-image"], tlsVerify, c.imagePullSecrets) if err != nil { return "", false, fmt.Errorf("Unable to create image build job: %v", err) } } else { // Image already exists return image, false, nil } return image, true, nil } // ensureK8sResources creates/updates k8s objects (deploy, svc, configmap) for the function func (c *FunctionController) ensureK8sResources(funcObj *kubelessApi.Function) error { if len(funcObj.ObjectMeta.Labels) == 0 { funcObj.ObjectMeta.Labels = make(map[string]string) } funcObj.ObjectMeta.Labels["function"] = funcObj.ObjectMeta.Name deployment := appsv1.Deployment{} if deploymentConfigData, ok := c.config.Data["deployment"]; ok { err := yaml.UnmarshalStrict([]byte(deploymentConfigData), &deployment, yaml.DisallowUnknownFields) if err != nil { logrus.Errorf("Error parsing Deployment data in ConfigMap kubeless-function-deployment-config: %v", err) return err } err = utils.MergeDeployments(&funcObj.Spec.Deployment, &deployment) if err != nil { logrus.Errorf(" Error while merging function.Spec.Deployment and Deployment from ConfigMap: %v", err) return err } } or, err := utils.GetOwnerReference(funcKind, funcAPIVersion, funcObj.Name, funcObj.UID) if err != nil { return err } err = utils.EnsureFuncConfigMap(c.clientset, funcObj, or, c.langRuntime) if err != nil { return err } err = utils.EnsureFuncService(c.clientset, funcObj, or) if err != nil { return err } prebuiltImage := "" if len(funcObj.Spec.Deployment.Spec.Template.Spec.Containers) > 0 && funcObj.Spec.Deployment.Spec.Template.Spec.Containers[0].Image != "" { prebuiltImage = funcObj.Spec.Deployment.Spec.Template.Spec.Containers[0].Image } // Skip image build step if using a custom runtime if prebuiltImage == "" { if c.config.Data["enable-build-step"] == "true" { var isBuilding bool prebuiltImage, isBuilding, err = c.startImageBuildJob(funcObj, or) if err != nil { logrus.Errorf("Unable to build function: %v", err) } else { if isBuilding { logrus.Infof("Started build process for function %s", funcObj.ObjectMeta.Name) } else { logrus.Infof("Found existing image %s", prebuiltImage) } } } } else { logrus.Infof("Skipping image-build step for %s", funcObj.ObjectMeta.Name) } err = utils.EnsureFuncDeployment(c.clientset, funcObj, or, c.langRuntime, prebuiltImage, c.config.Data["provision-image"], c.imagePullSecrets) if err != nil { return err } if funcObj.Spec.HorizontalPodAutoscaler.Name != "" && funcObj.Spec.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Name != "" { funcObj.Spec.HorizontalPodAutoscaler.OwnerReferences = or if funcObj.Spec.HorizontalPodAutoscaler.Spec.Metrics[0].Type == v2beta1.ObjectMetricSourceType { // A service monitor is needed when the metric is an object err = utils.CreateServiceMonitor(*c.smclient, funcObj, funcObj.ObjectMeta.Namespace, or) if err != nil { return err } } err = utils.CreateAutoscale(c.clientset, funcObj.Spec.HorizontalPodAutoscaler) if err != nil && k8sErrors.IsAlreadyExists(err) { err = utils.UpdateAutoscale(c.clientset, funcObj.Spec.HorizontalPodAutoscaler) } if err != nil { return err } } else { // HorizontalPodAutoscaler doesn't exists, try to delete if it already existed err = c.deleteAutoscale(funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name) if err != nil && !k8sErrors.IsNotFound(err) { return err } } return nil } func (c *FunctionController) deleteAutoscale(ns, name string) error { if c.smclient != nil { // Delete Service monitor if the client is available err := utils.DeleteServiceMonitor(*c.smclient, name, ns) if err != nil && !k8sErrors.IsNotFound(err) { return err } } // delete autoscale err := utils.DeleteAutoscale(c.clientset, name, ns) if err != nil && !k8sErrors.IsNotFound(err) { return err } return nil } // deleteK8sResources removes k8s objects of the function func (c *FunctionController) deleteK8sResources(ns, name string) error { // delete deployment deletePolicy := metav1.DeletePropagationBackground err := c.clientset.Extensions().Deployments(ns).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &deletePolicy}) if err != nil && !k8sErrors.IsNotFound(err) { return err } // delete svc err = c.clientset.Core().Services(ns).Delete(name, &metav1.DeleteOptions{}) if err != nil && !k8sErrors.IsNotFound(err) { return err } // delete cm err = c.clientset.Core().ConfigMaps(ns).Delete(name, &metav1.DeleteOptions{}) if err != nil && !k8sErrors.IsNotFound(err) { return err } // delete service monitor err = c.deleteAutoscale(ns, name) if err != nil && !k8sErrors.IsNotFound(err) { return err } // delete build job err = c.clientset.BatchV1().Jobs(ns).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: fmt.Sprintf("created-by=kubeless,function=%s", name), }) if err != nil && !k8sErrors.IsNotFound(err) { return err } return nil } func functionObjChanged(oldFunctionObj, newFunctionObj *kubelessApi.Function) bool { // If the function object's deletion timestamp is set, then process if oldFunctionObj.DeletionTimestamp != newFunctionObj.DeletionTimestamp { return true } // If the new and old function object's resource version is same if oldFunctionObj.ResourceVersion == newFunctionObj.ResourceVersion { return false } newSpec := &oldFunctionObj.Spec oldSpec := &newFunctionObj.Spec if newSpec.Function != oldSpec.Function || // compare checksum since the url content type uses Function field to pass the URL for the function // comparing the checksum ensures that if the function code has changed but the URL remains the same, the function will get redeployed newSpec.Checksum != oldSpec.Checksum || newSpec.Handler != oldSpec.Handler || newSpec.FunctionContentType != oldSpec.FunctionContentType || newSpec.Deps != oldSpec.Deps || newSpec.Timeout != oldSpec.Timeout { return true } if !apiequality.Semantic.DeepEqual(newSpec.Deployment, oldSpec.Deployment) || !apiequality.Semantic.DeepEqual(newSpec.HorizontalPodAutoscaler, oldSpec.HorizontalPodAutoscaler) || !apiequality.Semantic.DeepEqual(newSpec.ServiceSpec, oldSpec.ServiceSpec) { return true } return false } ================================================ FILE: pkg/controller/function_controller_test.go ================================================ package controller import ( "reflect" "testing" "github.com/ghodss/yaml" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/langruntime" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ktesting "k8s.io/client-go/testing" ) func findAction(fake *fake.Clientset, verb, resource string) ktesting.Action { for _, a := range fake.Actions() { if a.Matches(verb, resource) { return a } } return nil } func hasAction(fake *fake.Clientset, verb, resource string) bool { return findAction(fake, verb, resource) != nil } func TestDeleteK8sResources(t *testing.T) { myNsFoo := metav1.ObjectMeta{ Namespace: "myns", Name: "foo", } deploy := appsv1.Deployment{ ObjectMeta: myNsFoo, } svc := v1.Service{ ObjectMeta: myNsFoo, } cm := v1.ConfigMap{ ObjectMeta: myNsFoo, } hpa := v2beta1.HorizontalPodAutoscaler{ ObjectMeta: myNsFoo, } clientset := fake.NewSimpleClientset(&deploy, &svc, &cm, &hpa) controller := FunctionController{ clientset: clientset, } if err := controller.deleteK8sResources("myns", "foo"); err != nil { t.Fatalf("Deleting resources returned err: %v", err) } t.Log("Actions:", clientset.Actions()) for _, kind := range []string{"services", "configmaps", "deployments", "horizontalpodautoscalers"} { a := findAction(clientset, "delete", kind) if a == nil { t.Errorf("failed to delete %s", kind) } else if ns := a.GetNamespace(); ns != "myns" { t.Errorf("deleted %s from wrong namespace (%s)", kind, ns) } else if n := a.(ktesting.DeleteAction).GetName(); n != "foo" { t.Errorf("deleted %s with wrong name (%s)", kind, n) } } // Similar with only svc remaining clientset = fake.NewSimpleClientset(&svc) controller = FunctionController{ clientset: clientset, } if err := controller.deleteK8sResources("myns", "foo"); err != nil { t.Fatalf("Deleting partial resources returned err: %v", err) } t.Log("Actions:", clientset.Actions()) if !hasAction(clientset, "delete", "services") { t.Errorf("failed to delete service") } clientset = fake.NewSimpleClientset(&deploy, &svc, &cm) controller = FunctionController{ clientset: clientset, } if err := controller.deleteK8sResources("myns", "foo"); err != nil { t.Fatalf("Deleting resources returned err: %v", err) } t.Log("Actions:", clientset.Actions()) for _, kind := range []string{"services", "configmaps", "deployments"} { a := findAction(clientset, "delete", kind) if a == nil { t.Errorf("failed to delete %s", kind) } else if ns := a.GetNamespace(); ns != "myns" { t.Errorf("deleted %s from wrong namespace (%s)", kind, ns) } } } func TestEnsureK8sResourcesWithDeploymentDefinitionFromConfigMap(t *testing.T) { funcObj := testFunc() deploymentConfigData := `{ "metadata": { "annotations": { "foo-from-deploy-cm": "bar-from-deploy-cm", "xyz": "valuefromcm" } }, "spec": { "replicas": 2, "template": { "metadata": { "annotations": { "podannotation-from-func-crd": "value-from-container" } } } } }` clientset := fake.NewSimpleClientset() controller := testController(clientset, funcObj.Namespace, map[string]string{ "deployment": deploymentConfigData, "runtime-images": testRuntimeImages(), }) if err := controller.ensureK8sResources(funcObj); err != nil { t.Fatalf("Creating/Updating resources returned err: %v", err) } dpm, _ := clientset.AppsV1().Deployments(funcObj.Namespace).Get(funcObj.Name, metav1.GetOptions{}) expectedAnnotations := map[string]string{ "bar": "foo", "foo-from-deploy-cm": "bar-from-deploy-cm", "xyz": "valuefromfunc", } for i := range expectedAnnotations { if dpm.ObjectMeta.Annotations[i] != expectedAnnotations[i] { t.Errorf("Expecting annotation %s but received %s", expectedAnnotations[i], dpm.ObjectMeta.Annotations[i]) } } if *dpm.Spec.Replicas != 10 { t.Fatalf("Expecting replicas as 10 but received : %d", *dpm.Spec.Replicas) } expectedPodAnnotations := map[string]string{ "bar": "foo", "foo-from-deploy-cm": "bar-from-deploy-cm", "xyz": "valuefromfunc", "podannotation-from-func-crd": "value-from-container", } for i := range expectedPodAnnotations { if dpm.Spec.Template.Annotations[i] != expectedPodAnnotations[i] { t.Fatalf("Expecting annotation %s but received %s", expectedPodAnnotations[i], dpm.ObjectMeta.Annotations[i]) } } } func TestEnsureK8sResourcesWithDeploymentDefinitionFromConfigMapUnknownKey(t *testing.T) { funcObj := testFunc() deploymentConfigData := `{ "spec": { "template": { "spec": { "unknown": "property" } } } }` controller := testController(fake.NewSimpleClientset(), funcObj.Namespace, map[string]string{ "deployment": deploymentConfigData, "runtime-images": testRuntimeImages(), }) if err := controller.ensureK8sResources(funcObj); err == nil { t.Fatalf("Unknown key in ConfigMap Deployment definition does not fail") } } func TestEnsureK8sResourcesWithLivenessProbeFromConfigMap(t *testing.T) { funcObj := testFunc() runtimeImages := `[ { "ID": "ruby", "depName": "Gemfile", "fileNameSuffix": ".rb", "versions": [ { "name": "ruby24", "version": "2.4", "initImage": "bitnami/ruby:2.4", "imagePullSecrets":[] } ], "livenessProbeInfo":{ "exec": { "command": [ "curl", "-f", "http://localhost:8080/healthz" ], }, "initialDelaySeconds": 5, "periodSeconds": 10 } } ]` clientset := fake.NewSimpleClientset() controller := testController(clientset, funcObj.Namespace, map[string]string{ "runtime-images": runtimeImages, }) if err := controller.ensureK8sResources(funcObj); err != nil { t.Fatalf("Creating/Updating resources returned err: %v", err) } dpm, _ := clientset.AppsV1().Deployments(funcObj.Namespace).Get(funcObj.Name, metav1.GetOptions{}) expectedLivenessProbe := &v1.Probe{ InitialDelaySeconds: int32(5), PeriodSeconds: int32(10), Handler: v1.Handler{ Exec: &v1.ExecAction{ Command: []string{"curl", "-f", "http://localhost:8080/healthz"}, }, }, } if !reflect.DeepEqual(dpm.Spec.Template.Spec.Containers[0].LivenessProbe, expectedLivenessProbe) { t.Fatalf("LivenessProbe found is '%v', although expected was '%v'", dpm.Spec.Template.Spec.Containers[0].LivenessProbe, expectedLivenessProbe) } } func testFunc() *kubelessApi.Function { var replicas int32 replicas = 10 funcAnno := map[string]string{ "bar": "foo", "xyz": "valuefromfunc", } return &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", Labels: map[string]string{"foo": "bar"}, UID: "foo-uid", }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "ruby2.4", Deployment: appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Annotations: funcAnno, }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: funcAnno, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Env: []v1.EnvVar{ { Name: "foo", Value: "bar", }, }, }, }, }, }, }, }, }, } } func testRuntimeImages() string { runtimeImages := []langruntime.RuntimeInfo{{ ID: "ruby", DepName: "Gemfile", FileNameSuffix: ".rb", Versions: []langruntime.RuntimeVersion{ { Name: "ruby24", Version: "2.4", Images: []langruntime.Image{ {Phase: "runtime", Image: "bitnami/ruby:2.4"}, }, ImagePullSecrets: []langruntime.ImageSecret{}, }, }, }} out, err := yaml.Marshal(runtimeImages) if err != nil { logrus.Fatal("Canot Marshall runtimeimage") } return string(out) } func testController(clientset kubernetes.Interface, namespace string, configData map[string]string) *FunctionController { kubelessConfigMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "kubeless-config", }, Data: configData, } _, err := clientset.CoreV1().ConfigMaps(namespace).Create(kubelessConfigMap) if err != nil { logrus.Fatal("Unable to create configmap") } config, err := clientset.CoreV1().ConfigMaps(namespace).Get("kubeless-config", metav1.GetOptions{}) if err != nil { logrus.Fatal("Unable to read the configmap") } var lr = langruntime.New(config) lr.ReadConfigMap() return &FunctionController{ logger: logrus.WithField("pkg", "controller"), clientset: clientset, langRuntime: lr, config: config, } } ================================================ FILE: pkg/function-image-builder/image_builder.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bufio" "fmt" "io/ioutil" "log" "os" "os/exec" lbuilder "github.com/kubeless/kubeless/pkg/function-image-builder/layer-builder" "github.com/spf13/cobra" ) var globalUsage = `` //TODO: add explanation func init() { layerCmd.Flags().Bool("insecure", false, "Disable TLS verification.") layerCmd.Flags().StringP("src", "", "", "Source image reference. F.e. dir://path/to/image") layerCmd.Flags().StringP("src-creds", "", "", "Source image credentials in case it is a private registry. F.e. user:my_pass") layerCmd.Flags().StringP("dst", "", "", "Destination image reference. F.e. docker://user/image") layerCmd.Flags().StringP("dst-creds", "", "", "Destination credentials in case it is a docker registry. F.e. user:my_pass") layerCmd.Flags().StringP("cwd", "", "", "Working directory") } func runCommand(command string, args []string) error { cmd := exec.Command(command, args...) stdout, _ := cmd.StdoutPipe() stderr, _ := cmd.StderrPipe() cmd.Start() scannerStdout := bufio.NewScanner(stdout) scannerStdout.Split(bufio.ScanLines) for scannerStdout.Scan() { m := scannerStdout.Text() fmt.Fprintln(os.Stdout, m) } scannerStderr := bufio.NewScanner(stderr) scannerStderr.Split(bufio.ScanLines) for scannerStderr.Scan() { m := scannerStderr.Text() fmt.Fprintln(os.Stderr, m) } return cmd.Wait() } func skopeoCopy(src, dst, srcCreds, dstCreds string, insecure bool) error { command := "skopeo" args := []string{"copy"} if srcCreds != "" { args = append(args, "--src-creds", srcCreds) } if dstCreds != "" { args = append(args, "--dest-creds", dstCreds) } if insecure { args = append(args, "--src-tls-verify=false", "--dest-tls-verify=false") } args = append(args, src, dst) return runCommand(command, args) } var layerCmd = &cobra.Command{ Use: "add-layer FLAG", Short: "Add tar as a image layer", Long: ``, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { log.Fatal("Need exactly one argument - layer tar") } layerTar := args[0] srcImage, err := cmd.Flags().GetString("src") if err != nil { log.Fatal(err) } if srcImage == "" { log.Fatal("Need specify the source image using the flag --src") } dstImage, err := cmd.Flags().GetString("dst") if err != nil { log.Fatal(err) } if dstImage == "" { log.Fatal("Need specify the destination image using the flag --dst") } srcCreds, err := cmd.Flags().GetString("src-creds") if err != nil { log.Fatal(err) } dstCreds, err := cmd.Flags().GetString("dst-creds") if err != nil { log.Fatal(err) } workDir, err := cmd.Flags().GetString("cwd") if err != nil { log.Fatal(err) } if workDir == "" { workDir, err = ioutil.TempDir("", "build") if err != nil { log.Fatal(err) } } insecure, err := cmd.Flags().GetBool("insecure") if err != nil { log.Fatal(err) } // Store src image err = skopeoCopy(srcImage, fmt.Sprintf("dir://%s", workDir), srcCreds, dstCreds, insecure) if err != nil { log.Fatal(err) } log.Println("Succesfully stored base image ", srcImage, " at ", workDir) // Add layer err = lbuilder.AddTarToLayer(workDir, layerTar) if err != nil { log.Fatal(err) } log.Println("Added layer ", layerTar, " in ", workDir) // Publish new image err = skopeoCopy(fmt.Sprintf("dir://%s", workDir), dstImage, srcCreds, dstCreds, insecure) if err != nil { log.Fatal(err) } log.Println("Succesfully stored final image at ", dstImage) }, } func newRootCmd() *cobra.Command { cmd := &cobra.Command{ Use: "imbuilder", Short: "Pulls an image and push a new one including a tar file as a new layer", Long: globalUsage, } cmd.AddCommand(layerCmd) return cmd } func main() { cmd := newRootCmd() if err := cmd.Execute(); err != nil { os.Exit(1) } } ================================================ FILE: pkg/function-image-builder/layer-builder/description.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "crypto/sha256" "encoding/json" "fmt" "io" "io/ioutil" "time" ) // Config represents a container configuration type Config struct { Hostname string Domainname string User string AttachStdin bool AttachStdout bool AttachStderr bool Tty bool OpenStdin bool StdinOnce bool Env []string Cmd []string ArgsEscaped bool Image string Volumes interface{} WorkingDir string Entrypoint interface{} OnBuild interface{} Labels interface{} } // HistoryEntry represents a layer creation info type HistoryEntry struct { Created string `json:"created"` CreatedBy string `json:"created_by,omitifempty"` Comment string `json:"comment,omitifempty"` EmptyLayer bool `json:"empty_layer,omitifempty"` } // Rootfs represents the root filesystem of an image type Rootfs struct { Type string `json:"type"` DiffIds []string `json:"diff_ids"` } // Description represents the specification of a Docker image type Description struct { Arch string `json:"architecture"` Config Config `json:"config"` Container string `json:"container"` ContainerConfig Config `json:"container_config"` Created string `json:"created"` DockerVersion string `json:"docker_version"` History []HistoryEntry `json:"history"` OS string `json:"os"` Rootfs Rootfs `json:"rootfs"` } // New generates a Description object based on the description file func (d *Description) New(descriptionFile io.Reader) error { descriptionContent, err := ioutil.ReadAll(descriptionFile) if err != nil { return err } return json.Unmarshal(descriptionContent, d) } // AddLayer adds a new Layer to the image Description func (d *Description) AddLayer(newLayer *Layer) { // Delete some properties that doesn't apply anymore d.Config.Hostname = "" d.Config.Image = "" d.Container = "" d.ContainerConfig.Hostname = "" d.ContainerConfig.Image = "" // Update new properties d.Created = time.Now().UTC().Format(time.RFC3339) d.History = append(d.History, HistoryEntry{ Created: time.Now().UTC().Format(time.RFC3339), Comment: "Created by Kubeless", }) d.Rootfs.DiffIds = append(d.Rootfs.DiffIds, fmt.Sprintf("sha256:%s", newLayer.Sha256)) } // Content returns the description content func (d *Description) Content() ([]byte, error) { return json.Marshal(*d) } // ToLayer returns the Description as a Layer func (d *Description) ToLayer() (*Layer, error) { content, err := d.Content() if err != nil { return nil, err } descriptionNewSize := int64(len(content)) descriptionNewSha := fmt.Sprintf("%x", sha256.Sum256(content)) return &Layer{ Size: descriptionNewSize, Sha256: descriptionNewSha, }, nil } ================================================ FILE: pkg/function-image-builder/layer-builder/description_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "strings" "testing" ) func TestNewDescription(t *testing.T) { descFile := strings.NewReader(`{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"ArgsEscaped":true,"Image":"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"8d2c840a1a9b2544fe713c2e24b6757d52328f09bdfc9c2ef6219afbf7ae6b59","container_config":{"Hostname":"8d2c840a1a9b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) "],"ArgsEscaped":true,"Image":"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2018-02-28T22:14:49.023807051Z","docker_version":"17.06.2-ce","history":[{"created":"2018-02-28T22:14:48.759033366Z","created_by":"/bin/sh -c #(nop) ADD file:327f69fc1ac9a7b6e56e9032f7b8fbd7741dd0b22920761909c6c8e5fa9c5815 in / "},{"created":"2018-02-28T22:14:49.023807051Z","created_by":"/bin/sh -c #(nop) ","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c5183829c43c4698634093dc38f9bee26d1b931dedeba71dbee984f42fe1270d"]}}`) d := Description{} err := d.New(descFile) if err != nil { t.Errorf("Unexpected error %v", err) } } func TestAddLayerDescription(t *testing.T) { descFile := strings.NewReader(`{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"ArgsEscaped":true,"Image":"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"8d2c840a1a9b2544fe713c2e24b6757d52328f09bdfc9c2ef6219afbf7ae6b59","container_config":{"Hostname":"8d2c840a1a9b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) "],"ArgsEscaped":true,"Image":"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2018-02-28T22:14:49.023807051Z","docker_version":"17.06.2-ce","history":[{"created":"2018-02-28T22:14:48.759033366Z","created_by":"/bin/sh -c #(nop) ADD file:327f69fc1ac9a7b6e56e9032f7b8fbd7741dd0b22920761909c6c8e5fa9c5815 in / "},{"created":"2018-02-28T22:14:49.023807051Z","created_by":"/bin/sh -c #(nop) ","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c5183829c43c4698634093dc38f9bee26d1b931dedeba71dbee984f42fe1270d"]}}`) d := Description{} err := d.New(descFile) if err != nil { t.Errorf("Unexpected error %v", err) } newLayer := Layer{ Size: 10, Sha256: "abc123", } d.AddLayer(&newLayer) // Last history entry should be the new layer if d.History[len(d.History)-1].Comment != "Created by Kubeless" { t.Errorf("Failed to include new layer: %v", d.History) } // Last rootfs.diff_id should be the new layer if d.Rootfs.DiffIds[len(d.Rootfs.DiffIds)-1] == "abc123" { t.Error("Failed to include new layer") } } func TestDescriptionToLayer(t *testing.T) { emptyDesc := Description{} res, err := emptyDesc.ToLayer() if err != nil { t.Fatalf("Unexpected error %v", err) } expectedSize := int64(721) expectedSha := "17263670d4f12e26a270c7ec0a443c3ba8354da1d42f43f8e421634c5965bb6b" if res.Sha256 != expectedSha { t.Errorf("Unexpected sha256 %s", res.Sha256) } if res.Size != expectedSize { t.Errorf("Unexpected size %d", res.Size) } } ================================================ FILE: pkg/function-image-builder/layer-builder/layer.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "crypto/sha256" "fmt" "io/ioutil" "os" ) // Layer represent the size and checksum of a image layer type Layer struct { Size int64 Sha256 string } // New returns a Layer based on its file func (f *Layer) New(layerFile *os.File) error { // Calculate sha256 fContent, err := ioutil.ReadAll(layerFile) if err != nil { return err } f.Sha256 = fmt.Sprintf("%x", sha256.Sum256(fContent)) // Calculate size fstat, err := layerFile.Stat() if err != nil { return err } f.Size = fstat.Size() return nil } ================================================ FILE: pkg/function-image-builder/layer-builder/layer_builder.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "log" "os" "path" "strings" ) func copyReader(src io.Reader, dst string) error { dstFile, err := os.Create(dst) if err != nil { return err } defer dstFile.Close() _, err = io.Copy(dstFile, src) if err != nil { return err } err = dstFile.Sync() if err != nil { return err } return nil } func copyFile(src, dst string) error { srcFile, err := os.Open(src) if err != nil { return err } defer srcFile.Close() return copyReader(srcFile, dst) } func getLayer(file string) (*Layer, error) { layerFile, err := os.Open(file) if err != nil { return nil, err } defer layerFile.Close() layer := Layer{} err = layer.New(layerFile) if err != nil { return nil, err } return &layer, nil } func saveNewDescription(content []byte, dir, contentChecksum string) error { dLayerFile := path.Join(dir, contentChecksum) return copyReader(bytes.NewReader(content), dLayerFile) } func updateDescription(descriptionDir string, descriptionFile *os.File, newLayer *Layer) (*Description, error) { d := Description{} err := d.New(descriptionFile) if err != nil { return nil, fmt.Errorf("Unable to parse image description: %v", err) } d.AddLayer(newLayer) if err != nil { return nil, fmt.Errorf("Unable to update image description: %v", err) } return &d, nil } // AddTarToLayer copies a tar file into a image directory and update its metadata func AddTarToLayer(imageDir, tarFile string) error { tarLayer, err := getLayer(tarFile) if err != nil { return err } destFile := path.Join(imageDir, tarLayer.Sha256) err = copyFile(tarFile, destFile) if err != nil { return fmt.Errorf("Failed to copy tar file: %v", err) } log.Printf("Copied source %s to %s", tarFile, destFile) // Parse manifest manifestPath := path.Join(imageDir, "manifest.json") manifestFile, err := os.Open(manifestPath) if err != nil { return err } m := Manifest{} err = m.New(manifestFile) if err != nil { return fmt.Errorf("Failed to parse image manifest: %v", err) } log.Printf("Parsed manifest") // Update description descriptionPath := path.Join(imageDir, strings.Replace(m.Config.Digest, "sha256:", "", -1)) descriptionFile, err := os.Open(descriptionPath) if err != nil { return err } description, err := updateDescription(imageDir, descriptionFile, tarLayer) if err != nil { return err } descriptionLayer, err := description.ToLayer() if err != nil { return fmt.Errorf("Unable to generate layer from description: %v", err) } descriptionContent, err := description.Content() if err != nil { return err } err = saveNewDescription(descriptionContent, imageDir, descriptionLayer.Sha256) if err != nil { return err } log.Printf("Added layer to description at %s", descriptionLayer.Sha256) // Update manifest m.UpdateConfig(descriptionLayer) m.AddLayer(tarLayer) mBytes, err := json.Marshal(m) if err != nil { return err } err = ioutil.WriteFile(manifestPath, mBytes, 0644) if err != nil { return err } log.Printf("Updated manifest") return nil } ================================================ FILE: pkg/function-image-builder/layer-builder/layer_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "io/ioutil" "os" "testing" ) func TestNewLayer(t *testing.T) { f, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) f.WriteString("test content") layer := Layer{} err = layer.New(f) if err != nil { t.Fatalf("Unexpected error %v", err) } if layer.Sha256 != "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { t.Errorf("Wrong sha, expecting patata, received %s", layer.Sha256) } if layer.Size != 12 { t.Errorf("Wrong size, expecting patata, received %d", layer.Size) } } ================================================ FILE: pkg/function-image-builder/layer-builder/manifest.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "encoding/json" "fmt" "io" "io/ioutil" ) type layer struct { MediaType string `json:"mediaType"` Size int64 `json:"size"` Digest string `json:"digest"` } // Manifest represent the manifest.json of an image type Manifest struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` Config layer `json:"config"` Layers []layer `json:"layers"` } // New parses an io.Reader into a Manifest func (m *Manifest) New(manifestFile io.Reader) error { manifestContent, err := ioutil.ReadAll(manifestFile) if err != nil { return err } err = json.Unmarshal(manifestContent, m) if err != nil { return nil } return nil } // UpdateConfig overrides the Config information of the manifest with a new Layer func (m *Manifest) UpdateConfig(newConfig *Layer) { m.Config.Size = int64(newConfig.Size) m.Config.Digest = fmt.Sprintf("sha256:%s", newConfig.Sha256) } // AddLayer adds a new layer to the list in the Manifest func (m *Manifest) AddLayer(newLayer *Layer) { m.Layers = append(m.Layers, layer{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: newLayer.Size, Digest: fmt.Sprintf("sha256:%s", newLayer.Sha256), }) } ================================================ FILE: pkg/function-image-builder/layer-builder/manifest_test.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package layerbuilder import ( "strings" "testing" ) func TestNewManifest(t *testing.T) { manifestFile := strings.NewReader(`{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":1489,"digest":"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d"},"layers":[{"mediaType":"application/vnd.docker.image.rootfs.diff.tar.gzip","size":723113,"digest":"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910"}]}`) m := Manifest{} err := m.New(manifestFile) if err != nil { t.Errorf("Unexpected error %v", err) } if m.Config.Size != 1489 { t.Errorf("Unexpected size %d", m.Config.Size) } if m.Config.Digest != "sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d" { t.Errorf("Unexpected digest %s", m.Config.Digest) } if len(m.Layers) != 1 { t.Errorf("Unexpected layers length %d", len(m.Layers)) } } func TestAddNewLayer(t *testing.T) { manifestFile := strings.NewReader(`{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":1489,"digest":"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d"},"layers":[{"mediaType":"application/vnd.docker.image.rootfs.diff.tar.gzip","size":723113,"digest":"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910"}]}`) m := Manifest{} err := m.New(manifestFile) if err != nil { t.Errorf("Unexpected error %v", err) } m.AddLayer(&Layer{ Size: 10, Sha256: "Test", }) if len(m.Layers) != 2 { t.Errorf("Unexpected layers length %d", len(m.Layers)) } if m.Layers[1].Size != 10 && m.Layers[1].Digest != "Test" { t.Errorf("Unexpected layer %v", m.Layers[1]) } } func TestUpdateConfig(t *testing.T) { manifestFile := strings.NewReader(`{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":1489,"digest":"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d"},"layers":[{"mediaType":"application/vnd.docker.image.rootfs.diff.tar.gzip","size":723113,"digest":"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910"}]}`) m := Manifest{} err := m.New(manifestFile) if err != nil { t.Errorf("Unexpected error %v", err) } m.UpdateConfig(&Layer{ Size: 10, Sha256: "Test", }) if m.Config.Size != 10 && m.Config.Digest != "Test" { t.Errorf("Unexpected layer %v", m.Config) } } ================================================ FILE: pkg/function-proxy/Gopkg.toml ================================================ [[constraint]] name = "github.com/prometheus/client_golang" revision = "f504d69affe11ec1ccb2e5948127f86878c9fd57" ================================================ FILE: pkg/function-proxy/proxy.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "golang.org/x/net/context" "io/ioutil" "log" "net/http" "os" "os/exec" "github.com/kubeless/kubeless/pkg/function-proxy/utils" "github.com/prometheus/client_golang/prometheus/promhttp" ) func copyHeaders(dst, src http.Header) { for k, vv := range src { for _, v := range vv { dst.Add(k, v) } } } func handle(ctx context.Context, w http.ResponseWriter, r *http.Request) ([]byte, error) { client := &http.Client{} req, err := http.NewRequest(r.Method, "http://localhost:8090", r.Body) if err != nil { return []byte{}, err } copyHeaders(req.Header, r.Header) req.ContentLength = r.ContentLength response, err := client.Do(req) if err != nil { return []byte{}, err } return ioutil.ReadAll(response.Body) } func handler(w http.ResponseWriter, r *http.Request) { utils.Handler(w, r, handle) } func health(w http.ResponseWriter, r *http.Request) { rr, err := http.Get("http://localhost:8090/healthz") res, _ := ioutil.ReadAll(rr.Body) log.Println(string(res)) if err != nil { log.Fatalln("localhost:8090 not responding") w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Internal Server error")) } else { w.Write([]byte("OK")) } } func startNativeDaemon() { args := os.Getenv("FUNC_PROCESS") cmd := exec.Command("/bin/sh", "-c", args) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if err != nil { log.Fatalf("Unable to run %s. Received %v", args, err) } } func main() { go startNativeDaemon() mux := http.NewServeMux() mux.HandleFunc("/", handler) mux.HandleFunc("/healthz", health) mux.Handle("/metrics", promhttp.Handler()) server := utils.NewServer(mux) go func() { if err := server.ListenAndServe(); err != http.ErrServerClosed { panic(err) } }() utils.GracefulShutdown(server) } ================================================ FILE: pkg/function-proxy/utils/proxy-utils.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "fmt" "golang.org/x/net/context" "log" "net/http" "os" "os/signal" "strconv" "syscall" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( timeout = os.Getenv("FUNC_TIMEOUT") funcPort = os.Getenv("FUNC_PORT") shutdownTimeout = os.Getenv("SHUTDOWN_TIMEOUT") intTimeout int intShutdownTimeout int funcHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "function_duration_seconds", Help: "Duration of user function in seconds", }, []string{"method"}) funcCalls = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "function_calls_total", Help: "Number of calls to user function", }, []string{"method"}) funcErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "function_failures_total", Help: "Number of exceptions in user function", }, []string{"method"}) ) // PromHTTPHandler to expose the metrics, invoked in the golang runtime func PromHTTPHandler() http.Handler { return promhttp.Handler() } func init() { if timeout == "" { timeout = "180" } if funcPort == "" { funcPort = "8080" } if shutdownTimeout == "" { shutdownTimeout = "10" } var err error intTimeout, err = strconv.Atoi(timeout) if err != nil { panic(err) } intShutdownTimeout, err = strconv.Atoi(shutdownTimeout) if err != nil { panic(err) } prometheus.MustRegister(funcHistogram, funcCalls, funcErrors) } // Logging Functions, required to expose statusCode property type loggingResponseWriter struct { http.ResponseWriter statusCode int } func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter { return &loggingResponseWriter{w, http.StatusOK} } func (lrw *loggingResponseWriter) WriteHeader(code int) { lrw.statusCode = code lrw.ResponseWriter.WriteHeader(code) } func logReq(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { lrw := newLoggingResponseWriter(w) handler.ServeHTTP(lrw, r) log.Printf("%s \"%s %s %s\" %d %s", r.RemoteAddr, r.Method, r.RequestURI, r.Proto, lrw.statusCode, r.UserAgent()) if lrw.statusCode == 408 { go func() { // Give time to return timeout response time.Sleep(time.Second) log.Fatal("Request timeout. Forcing exit") }() } }) } func copyHeaders(dst, src http.Header) { for k, vv := range src { for _, v := range vv { dst.Add(k, v) } } } // Handle type receive the context elements of a HTTP request to process it type Handle func(ctx context.Context, w http.ResponseWriter, r *http.Request) ([]byte, error) // Handler receives an HTTP request and response and a handler function // It manages timeouts and prometheus metrics func Handler(w http.ResponseWriter, r *http.Request, h Handle) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(intTimeout)*time.Second) defer cancel() funcChannel := make(chan struct { res string err error }, 1) go func() { funcCalls.With(prometheus.Labels{"method": r.Method}).Inc() start := time.Now() res, err := h(ctx, w, r) funcHistogram.With(prometheus.Labels{"method": r.Method}).Observe(time.Since(start).Seconds()) pack := struct { res string err error }{string(res), err} funcChannel <- pack }() select { case respPack := <-funcChannel: if respPack.err != nil { funcErrors.With(prometheus.Labels{"method": r.Method}).Inc() w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(fmt.Sprintf("Error: %v", respPack.err))) } else { w.Write([]byte(respPack.res)) } // Send Timeout response case <-ctx.Done(): funcErrors.With(prometheus.Labels{"method": r.Method}).Inc() w.WriteHeader(http.StatusRequestTimeout) w.Write([]byte("Timeout exceeded")) } } // NewServer returns an HTTP server ready to listen on the configured port // and with logReq mixed in for logging. func NewServer(mux *http.ServeMux) *http.Server { return &http.Server{Addr: fmt.Sprintf(":%s", funcPort), Handler: logReq(mux)} } // GracefulShutdown accepts a server reference and triggers a graceful shutdown // for it when either SIGINT or SIGTERM is received. func GracefulShutdown(server *http.Server) { stop := make(chan os.Signal, 1) signal.Notify(stop, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) <-stop timeoutDuration := time.Duration(intShutdownTimeout) * time.Second ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) defer cancel() log.Printf("Shuting down with timeout: %s\n", timeoutDuration) if err := server.Shutdown(ctx); err != nil { log.Printf("Error: %v\n", err) } else { log.Println("Server stopped") } } ================================================ FILE: pkg/functions/params.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package functions import ( "golang.org/x/net/context" "net/http" ) // Extension includes a reference to the Event request and its Context (to handle timeouts) type Extension struct { Request *http.Request Response http.ResponseWriter Context context.Context } // Event includes information about the event source type Event struct { Data string EventID string EventType string EventTime string EventNamespace string Extensions Extension } // Context includes information about the function environment type Context struct { FunctionName string Timeout string Runtime string MemoryLimit string } ================================================ FILE: pkg/langruntime/langruntime.go ================================================ package langruntime import ( "fmt" "os" "path" "regexp" "strings" "github.com/ghodss/yaml" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" ) const ( // PhaseInstallation - Installation phase name PhaseInstallation = "installation" // PhaseCompilation - Compilation phase name PhaseCompilation = "compilation" // PhaseRuntime - Runtime phase name PhaseRuntime = "runtime" ) // Langruntimes struct for getting configmap type Langruntimes struct { kubelessConfig *v1.ConfigMap AvailableRuntimes []RuntimeInfo } // Image represents the information about a runtime phase type Image struct { Phase string `yaml:"phase"` Image string `yaml:"image"` Command string `yaml:"command,omitempty"` Env map[string]string `yaml:"env,omitempty"` Secrets []Secret `yaml:"secrets,omitempty"` } // Secret is a reference to a secret. type Secret struct { Name string `yaml:"name,omitempty"` } // RuntimeVersion is a struct with all the info about the images and secrets type RuntimeVersion struct { Name string `yaml:"name"` Version string `yaml:"version"` Images []Image `yaml:"runtimeImage"` ImagePullSecrets []ImageSecret `yaml:"imagePullSecrets,omitempty"` } // ImageSecret for pulling the image type ImageSecret struct { ImageSecret string `yaml:"imageSecret,omitempty"` } // RuntimeInfo describe the runtime specifics (typical file suffix and dependency file name) // and the supported versions type RuntimeInfo struct { ID string `yaml:"ID"` Versions []RuntimeVersion `yaml:"versions"` LivenessProbeInfo *v1.Probe `yaml:"livenessProbeInfo,omitempty"` DepName string `yaml:"depName"` FileNameSuffix string `yaml:"fileNameSuffix"` } // New initializes a langruntime object func New(config *v1.ConfigMap) *Langruntimes { var ri []RuntimeInfo return &Langruntimes{ kubelessConfig: config, AvailableRuntimes: ri, } } // ReadConfigMap reads the configmap func (l *Langruntimes) ReadConfigMap() { if runtimeImages, ok := l.kubelessConfig.Data["runtime-images"]; ok { err := yaml.Unmarshal([]byte(runtimeImages), &l.AvailableRuntimes) if err != nil { logrus.Fatalf("Unable to get the runtime images: %v", err) } } } // GetRuntimes returns the list of available runtimes as strings func (l *Langruntimes) GetRuntimes() []string { result := []string{} for _, runtimeInf := range l.AvailableRuntimes { for _, runtime := range runtimeInf.Versions { result = append(result, runtimeInf.ID+runtime.Version) } } return result } // IsValidRuntime returns true if passed runtime name is valid runtime func (l *Langruntimes) IsValidRuntime(runtime string) bool { for _, validRuntime := range l.GetRuntimes() { if runtime == validRuntime { return true } } return false } func (l *Langruntimes) getAvailableRuntimesPerTrigger(imageType string) []string { var runtimeList []string for i := range l.AvailableRuntimes { for j := range l.AvailableRuntimes[i].Versions { if l.findImage(PhaseRuntime, l.AvailableRuntimes[i].Versions[j]) != nil { runtimeList = append(runtimeList, l.AvailableRuntimes[i].ID+l.AvailableRuntimes[i].Versions[j].Version) } } } return runtimeList } // extract the branch number from the runtime string func (l *Langruntimes) getVersionFromRuntime(runtime string) string { re := regexp.MustCompile("[0-9.]+$") return re.FindString(runtime) } // GetRuntimeInfo returns all the info regarding a runtime func (l *Langruntimes) GetRuntimeInfo(runtime string) (RuntimeInfo, error) { runtimeID := regexp.MustCompile("^[a-zA-Z_-]+").FindString(runtime) for _, runtimeInf := range l.AvailableRuntimes { if runtimeInf.ID == runtimeID { return runtimeInf, nil } } return RuntimeInfo{}, fmt.Errorf("Unable to find %s as runtime", runtime) } // GetLivenessProbeInfo returs the liveness probe info regarding a runtime func (l *Langruntimes) GetLivenessProbeInfo(runtime string, port int) *v1.Probe { livenessProbe := &v1.Probe{ InitialDelaySeconds: int32(3), PeriodSeconds: int32(30), Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(port), }, }, } runtimeID := regexp.MustCompile("^[a-zA-Z]+").FindString(runtime) for _, runtimeInf := range l.AvailableRuntimes { if runtimeInf.ID == runtimeID { if runtimeInf.LivenessProbeInfo != nil { return runtimeInf.LivenessProbeInfo } return livenessProbe } } return livenessProbe } func (l *Langruntimes) findRuntimeVersion(runtimeWithVersion string) (RuntimeVersion, error) { version := l.getVersionFromRuntime(runtimeWithVersion) runtimeInf, err := l.GetRuntimeInfo(runtimeWithVersion) if err != nil { return RuntimeVersion{}, err } for _, versionInf := range runtimeInf.Versions { if versionInf.Version == version { return versionInf, nil } } return RuntimeVersion{}, fmt.Errorf("The given runtime and version %s is not valid", runtimeWithVersion) } // Returns the image information of a phase or null if the phase is not found func (l *Langruntimes) findImage(phase string, runtime RuntimeVersion) *Image { for _, imageInf := range runtime.Images { if imageInf.Phase == phase { return &imageInf } } return nil } // GetFunctionImage returns the image ID depending on the runtime, its version and function type func (l *Langruntimes) GetFunctionImage(runtime string) (string, error) { runtimeInf, err := l.GetRuntimeInfo(runtime) if err != nil { return "", err } imageNameEnvVar := strings.ToUpper(runtimeInf.ID) + l.getVersionFromRuntime(runtime) + "_RUNTIME" imageName := os.Getenv(imageNameEnvVar) if imageName == "" { versionInf, err := l.findRuntimeVersion(runtime) if err != nil { return "", err } runtimeImage := l.findImage(PhaseRuntime, versionInf) if runtimeImage == nil { err = fmt.Errorf("The given runtime and version '%s' does not have a valid image for HTTP based functions. Available runtimes are: %s", runtime, strings.Join(l.getAvailableRuntimesPerTrigger("HTTP")[:], ", ")) } else { imageName = runtimeImage.Image } } return imageName, nil } // GetImageSecrets gets the secrets to pull the runtime image func (l *Langruntimes) GetImageSecrets(runtime string) ([]v1.LocalObjectReference, error) { var secrets []string runtimeInf, err := l.findRuntimeVersion(runtime) if err != nil { return []v1.LocalObjectReference{}, err } if len(runtimeInf.ImagePullSecrets) == 0 { return []v1.LocalObjectReference{}, nil } for _, s := range runtimeInf.ImagePullSecrets { secrets = append(secrets, s.ImageSecret) } var lors []v1.LocalObjectReference if len(secrets) > 0 { for _, s := range secrets { lor := v1.LocalObjectReference{Name: s} lors = append(lors, lor) } } return lors, nil } // GetInitContainerSecrets gets the secrets of the init container with name func (l *Langruntimes) GetInitContainerSecrets(runtime, name string) ([]v1.LocalObjectReference, error) { runtimeInf, err := l.findRuntimeVersion(runtime) if err != nil { return nil, err } if len(runtimeInf.Images) == 0 { return nil, nil } var secrets []Secret phase := name2phase(name) for _, i := range runtimeInf.Images { if i.Phase == phase { secrets = append(secrets, i.Secrets...) break } } var refs []v1.LocalObjectReference for _, s := range secrets { refs = append(refs, v1.LocalObjectReference{Name: s.Name}) } return refs, nil } func appendToCommand(orig string, command ...string) string { if len(orig) > 0 { return fmt.Sprintf("%s && %s", orig, strings.Join(command, " && ")) } return strings.Join(command, " && ") } func parseEnv(env map[string]string) []v1.EnvVar { res := []v1.EnvVar{} for key, value := range env { res = append(res, v1.EnvVar{Name: key, Value: value}) } return res } // GetBuildContainer returns a Container definition based on a runtime func (l *Langruntimes) GetBuildContainer(runtime, depsChecksum string, env []v1.EnvVar, installVolume v1.VolumeMount, resources v1.ResourceRequirements) (v1.Container, error) { runtimeInf, err := l.GetRuntimeInfo(runtime) if err != nil { return v1.Container{}, err } depsFile := path.Join(installVolume.MountPath, runtimeInf.DepName) versionInf, err := l.findRuntimeVersion(runtime) if err != nil { return v1.Container{}, err } imageInf := l.findImage(PhaseInstallation, versionInf) if imageInf == nil { // The runtime doesn't have an installation hook return v1.Container{}, nil } var command string // Validate deps checksum shaFile := "/tmp/deps.sha256" // if checksum exist, check sum if depsChecksum != "" { command = appendToCommand(command, fmt.Sprintf("echo '%s %s' > %s", depsChecksum, depsFile, shaFile), fmt.Sprintf("sha256sum -c %s", shaFile), imageInf.Command, ) } else { command = appendToCommand(command, imageInf.Command) } env = append( env, v1.EnvVar{Name: "KUBELESS_INSTALL_VOLUME", Value: installVolume.MountPath}, v1.EnvVar{Name: "KUBELESS_DEPS_FILE", Value: depsFile}, ) env = append(env, parseEnv(imageInf.Env)...) return v1.Container{ Name: "install", Image: imageInf.Image, Command: []string{"sh", "-c"}, Args: []string{command}, VolumeMounts: []v1.VolumeMount{installVolume}, ImagePullPolicy: v1.PullIfNotPresent, WorkingDir: installVolume.MountPath, Env: env, Resources: resources, }, nil } // UpdateDeployment object in case of custom runtime func (l *Langruntimes) UpdateDeployment(dpm *appsv1.Deployment, volPath, runtime string) { versionInf, err := l.findRuntimeVersion(runtime) if err != nil { // Not found an image for the given runtime return } dpm.Spec.Template.Spec.Containers[0].Env = append( dpm.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "KUBELESS_INSTALL_VOLUME", Value: volPath}, ) imageInf := l.findImage(PhaseRuntime, versionInf) if imageInf == nil { // Not found an image for the given runtime return } dpm.Spec.Template.Spec.Containers[0].Env = append( dpm.Spec.Template.Spec.Containers[0].Env, parseEnv(imageInf.Env)..., ) } // GetCompilationContainer returns a Container definition based on a runtime func (l *Langruntimes) GetCompilationContainer(runtime, funcName string, env []v1.EnvVar, installVolume v1.VolumeMount, resources v1.ResourceRequirements) (*v1.Container, error) { versionInf, err := l.findRuntimeVersion(runtime) if err != nil { return nil, err } imageInf := l.findImage(PhaseCompilation, versionInf) if imageInf == nil { // The runtime doesn't have a compilation hook return nil, nil } env = append( env, v1.EnvVar{Name: "KUBELESS_INSTALL_VOLUME", Value: installVolume.MountPath}, v1.EnvVar{Name: "KUBELESS_FUNC_NAME", Value: funcName}, ) env = append(env, parseEnv(imageInf.Env)...) return &v1.Container{ Name: "compile", Image: imageInf.Image, Command: []string{"sh", "-c"}, Args: []string{imageInf.Command}, Env: env, VolumeMounts: []v1.VolumeMount{installVolume}, ImagePullPolicy: v1.PullIfNotPresent, WorkingDir: installVolume.MountPath, Resources: resources, }, nil } // name2phase returns the phase of an init container func name2phase(name string) string { switch name { case "compile": return PhaseCompilation case "install": return PhaseInstallation } return name } ================================================ FILE: pkg/langruntime/langruntime_test.go ================================================ package langruntime import ( "os" "reflect" "regexp" "strings" "testing" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/kubernetes/fake" ) var clientset = fake.NewSimpleClientset() func TestMain(m *testing.M) { AddFakeConfig(clientset) os.Exit(m.Run()) } func check(clientset *fake.Clientset, lr *Langruntimes, runtime, fname string, values []string, t *testing.T) { info, err := lr.GetRuntimeInfo(runtime) if err != nil { t.Fatal(err) } if info.DepName != values[0] { t.Fatalf("Retrieving the image returned a wrong dependencies file. Received " + info.DepName + " while expecting " + values[0]) } if fname+info.FileNameSuffix != values[1] { t.Fatalf("Retrieving the image returned a wrong file name. Received " + fname + info.FileNameSuffix + " while expecting " + values[1]) } } func TestGetFunctionFileNames(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() expectedValues := []string{"requirements.txt", "test.py"} check(clientset, lr, "python2.7", "test", expectedValues, t) } func TestGetFunctionImage(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() // Throws an error if the runtime doesn't exist _, err := lr.GetFunctionImage("unexistent") if err == nil { t.Fatalf("Retrieving data for 'unexistent' should return an error") } // Throws an error if the runtime version doesn't exist _, err = lr.GetFunctionImage("python10") expectedErrMsg := regexp.MustCompile("The given runtime and version python10 is not valid") if expectedErrMsg.FindString(err.Error()) == "" { t.Fatalf("Retrieving data for 'python10' should return an error. Received: %s", err) } expectedImageName := "ruby-test-image" os.Setenv("PYTHON2.7_RUNTIME", expectedImageName) imageR, errR := lr.GetFunctionImage("python2.7") if errR != nil { t.Errorf("Retrieving the image returned err: %v", errR) } if imageR != expectedImageName { t.Errorf("Expecting " + imageR + " to be set to " + expectedImageName) } os.Unsetenv("PYTHON2.7_RUNTIME") } func TestGetLivenessProbe(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() livenessProbe := lr.GetLivenessProbeInfo("python", 8080) expectedLivenessProbe := &v1.Probe{ InitialDelaySeconds: int32(5), PeriodSeconds: int32(10), Handler: v1.Handler{ Exec: &v1.ExecAction{ Command: []string{"curl", "-f", "http://localhost:8080/healthz"}, }, }, } if !reflect.DeepEqual(livenessProbe, expectedLivenessProbe) { t.Fatalf("Expected livenessProbeInfo to be %v, but found %v", expectedLivenessProbe, livenessProbe) } } func TestGetRuntimes(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() runtimes := strings.Join(lr.GetRuntimes(), ", ") expectedRuntimes := "python2.7" if runtimes != expectedRuntimes { t.Errorf("Expected %s but got %s", expectedRuntimes, runtimes) } } func TestGetBuildContainer(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() // It should throw an error if there is not an image available _, err := lr.GetBuildContainer("notExists", "", []v1.EnvVar{}, v1.VolumeMount{}, v1.ResourceRequirements{}) if err == nil { t.Error("Expected to throw an error") } // It should return the proper build image for python vol1 := v1.VolumeMount{Name: "v1", MountPath: "/v1"} resources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}} c, err := lr.GetBuildContainer("python2.7", "abc123", []v1.EnvVar{}, vol1, resources) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedContainer := v1.Container{ Name: "install", Image: "python:2.7", Command: []string{"sh", "-c"}, Args: []string{"echo 'abc123 /v1/requirements.txt' > /tmp/deps.sha256 && sha256sum -c /tmp/deps.sha256 && foo"}, VolumeMounts: []v1.VolumeMount{vol1}, WorkingDir: "/v1", ImagePullPolicy: v1.PullIfNotPresent, Env: []v1.EnvVar{ {Name: "KUBELESS_INSTALL_VOLUME", Value: "/v1"}, {Name: "KUBELESS_DEPS_FILE", Value: "/v1/requirements.txt"}, }, Resources: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}}, } if !reflect.DeepEqual(expectedContainer, c) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedContainer, c) } } func TestGetBuildContainerWithBundledDeps(t *testing.T) { lr := SetupLangRuntime(clientset) lr.ReadConfigMap() // It should return the proper build image for python vol1 := v1.VolumeMount{Name: "v1", MountPath: "/v1"} resources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}} c, err := lr.GetBuildContainer("python2.7", "", []v1.EnvVar{}, vol1, resources) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedContainer := v1.Container{ Name: "install", Image: "python:2.7", Command: []string{"sh", "-c"}, Args: []string{"foo"}, VolumeMounts: []v1.VolumeMount{vol1}, WorkingDir: "/v1", ImagePullPolicy: v1.PullIfNotPresent, Env: []v1.EnvVar{ {Name: "KUBELESS_INSTALL_VOLUME", Value: "/v1"}, {Name: "KUBELESS_DEPS_FILE", Value: "/v1/requirements.txt"}, }, Resources: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}}, } if !reflect.DeepEqual(expectedContainer, c) { t.Errorf("Unexpected result. Expecting:\n %+v\nReceived:\n %+v", expectedContainer, c) } } ================================================ FILE: pkg/langruntime/langruntimetestutils.go ================================================ package langruntime import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/sirupsen/logrus" "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" ) // AddFakeConfig initializes configmap for unit tests with fake configuration. func AddFakeConfig(clientset *fake.Clientset) { runtimeImages := `[ { "ID": "python", "compiled": false, "depName": "requirements.txt", "fileNameSuffix": ".py", "livenessProbeInfo": { "exec": { "command": ["curl", "-f", "http://localhost:8080/healthz"] }, "initialDelaySeconds": 5, "periodseconds": 10 }, "versions": [ { "images": [ { "command": "foo", "image": "python:2.7", "phase": "installation", "secrets": [{"name": "my-secret"}] }, { "image": "bar", "phase": "runtime", "env": {"PYTHONPATH": "/kubeless/lib/python2.7/site-packages:/kubeless"} } ], "name": "python27", "version": "2.7", "imagePullSecrets": [{"ImageSecret": "p1"}, {"ImageSecret": "p2"}] } ] } ]` cm := v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "kubeless-config", Namespace: "kubeless", }, Data: map[string]string{ "runtime-images": runtimeImages, }, } _, err := clientset.CoreV1().ConfigMaps("kubeless").Create(&cm) if err != nil { logrus.Fatal("Unable to create configmap") } } // SetupLangRuntime Sets up Langruntime struct func SetupLangRuntime(clientset *fake.Clientset) *Langruntimes { config, err := clientset.CoreV1().ConfigMaps("kubeless").Get("kubeless-config", metav1.GetOptions{}) if err != nil { logrus.Fatal("Unable to read the configmap") } var lr = New(config) return lr } ================================================ FILE: pkg/registry/registry.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package registry import ( "encoding/json" "fmt" "io/ioutil" "net/http" "reflect" "regexp" "time" "k8s.io/api/core/v1" ) // Credentials represent the required credentials to authenticate against a Docker registry type Credentials struct { Username string `json:"username"` Password string `json:"password"` Email string `json:"email,omitifempty"` Auth string `json:"auth,omitifempty"` } // Registry struct represents a Docker Registry type Registry struct { Endpoint string Version string Creds Credentials } type tagv1 struct { Layer string `json:"layer"` Name string `json:"name"` } type tagListV2 struct { Name string `json:"name"` Tags []string `json:"tags"` } type dockerCfg struct { Auths map[string]Credentials `json:"auths"` } // New returns a Registry struct parsing its URL and storing the required credentials func New(config v1.Secret) (*Registry, error) { // Parse secret cfg := dockerCfg{} err := json.Unmarshal(config.Data[".dockerconfigjson"], &cfg) if err != nil { return nil, err } regs := reflect.ValueOf(cfg.Auths).MapKeys() if len(regs) > 1 { return nil, fmt.Errorf("Found several registries: %q, unable to decide which one to use", regs) } registryURL := regs[0].String() re := regexp.MustCompile("(https?://.*)/(v[0-9]+)/?") parsedURL := re.FindStringSubmatch(registryURL) if len(parsedURL) == 0 { return nil, fmt.Errorf("Unable to parse registry URL %s", registryURL) } reg := Registry{ Endpoint: parsedURL[1], Version: parsedURL[2], Creds: cfg.Auths[registryURL], } return ®, err } // getTags return the list of tags from an HTTP response to the tag/list API endpoint func (r *Registry) getTags(body []byte) ([]string, error) { switch r.Version { case "v1": response := []tagv1{} err := json.Unmarshal(body, &response) if err != nil { return nil, err } tags := []string{} for _, tag := range response { tags = append(tags, tag.Name) } return tags, nil case "v2": response := tagListV2{} err := json.Unmarshal(body, &response) if err != nil { return nil, err } return response.Tags, nil default: return nil, fmt.Errorf("API version %s not supported", r.Version) } } // tagURL return the URL of the endpoint for listing existing tags func (r *Registry) tagURL(img string) (string, error) { switch r.Version { case "v1": return fmt.Sprintf("%s/%s/repositories/%s/tags", r.Endpoint, r.Version, img), nil case "v2": return fmt.Sprintf("%s/%s/%s/tags/list", r.Endpoint, r.Version, img), nil default: return "", fmt.Errorf("API version %s not supported", r.Version) } } // findProperty returns the value of a property from a list witht the format 'foo="bar",bar="foo"' func findProperty(src, property string) (string, error) { re := regexp.MustCompile(fmt.Sprintf("%s=\"([^\"]*)\"", property)) res := re.FindStringSubmatch(src) if len(res) != 2 { return "", fmt.Errorf("Unable to find the property %s in %s", property, src) } return res[1], nil } type authResponse struct { Token string `json:"token"` } // doRequestWithAuth does an HTTP GET agains the given url parsing the authInfo given func doRequestWithAuth(authInfo, url string, client *http.Client) ([]byte, error) { bearer, err := findProperty(authInfo, "Bearer realm") if err != nil { return nil, fmt.Errorf("Unable to extract auth info: %v", err) } service, err := findProperty(authInfo, "service") if err != nil { return nil, fmt.Errorf("Unable to extract auth info: %v", err) } scope, err := findProperty(authInfo, "scope") if err != nil { return nil, fmt.Errorf("Unable to extract auth info: %v", err) } authResp, err := client.Get(fmt.Sprintf("%s?service=%s&scope=%s", bearer, service, scope)) if err != nil { return nil, fmt.Errorf("Unable to obtain auth token: %v", err) } defer authResp.Body.Close() authb, err := ioutil.ReadAll(authResp.Body) if err != nil { return nil, err } authr := authResponse{} err = json.Unmarshal(authb, &authr) if err != nil { return nil, fmt.Errorf("Unable to parse auth token: %v", err) } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", authr.Token)) respWithAuth, err := client.Do(req) if err != nil { return nil, err } defer respWithAuth.Body.Close() body, err := ioutil.ReadAll(respWithAuth.Body) if err != nil { return nil, err } return body, nil } func (r *Registry) doRequest(url string) ([]byte, error) { tr := &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, DisableCompression: true, } client := &http.Client{ Transport: tr, } resp, err := client.Get(url) if err != nil { return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } // Handle auth if needed if resp.StatusCode == 401 { // Get auth info from headers authInfo := resp.Header.Get("Www-Authenticate") if authInfo == "" { return nil, fmt.Errorf("Failed to authenticate: unknown authentication format: %v", body) } body, err = doRequestWithAuth(authInfo, url, client) if err != nil { return nil, err } } return body, nil } // ImageExists checks if a certain image:tag exists in the registry func (r *Registry) ImageExists(id, tag string) (bool, error) { url, err := r.tagURL(id) if err != nil { return false, err } body, err := r.doRequest(url) if err != nil { return false, err } if match, _ := regexp.MatchString("Resource not found", string(body)); match { // There is no image with that ID yet return false, nil } tags, err := r.getTags(body) if err != nil { return false, err } for _, t := range tags { if t == tag { return true, nil } } return false, nil } ================================================ FILE: pkg/registry/registry_test.go ================================================ package registry import ( "reflect" "testing" "k8s.io/api/core/v1" ) func TestNew(t *testing.T) { s := v1.Secret{ Data: map[string][]byte{ ".dockerconfigjson": []byte("{\"auths\":{\"https://index.docker.io/v1/\":{\"username\":\"test\",\"password\":\"pass\"}}}"), }, } r, err := New(s) if err != nil { t.Error(err) } if r.Endpoint != "https://index.docker.io" { t.Errorf("Unexpected endpoint %s, expecting https://index.docker.io", r.Endpoint) } if r.Version != "v1" { t.Errorf("Unexpected version %s, expecting v1", r.Version) } if r.Creds.Username != "test" { t.Errorf("Unexpected username %s, expecting test", r.Creds.Username) } if r.Creds.Password != "pass" { t.Errorf("Unexpected password %s, expecting pass", r.Creds.Password) } } func TestTagURLV1(t *testing.T) { r := Registry{ Endpoint: "https://registry-1.docker.io", Version: "v1", } url, err := r.tagURL("test/image") if err != nil { t.Errorf("Unexpected error: %v", err) } if url != "https://registry-1.docker.io/v1/repositories/test/image/tags" { t.Errorf("Unexpected URL %s", url) } } func TestTagURLV2(t *testing.T) { r := Registry{ Endpoint: "https://registry-1.docker.io", Version: "v2", } url, err := r.tagURL("test/image") if err != nil { t.Errorf("Unexpected error: %v", err) } if url != "https://registry-1.docker.io/v2/test/image/tags/list" { t.Errorf("Unexpected URL %s", url) } } func TestGetTagsV1(t *testing.T) { r := Registry{ Endpoint: "https://registry-1.docker.io", Version: "v1", } body := []byte("[{\"later\": \"\", \"name\": \"latest\"}]") tags, err := r.getTags(body) if err != nil { t.Errorf("Unexpected error: %v", err) } expectedTags := []string{"latest"} if !reflect.DeepEqual(tags, expectedTags) { t.Errorf("Unexpected tags: %v", tags) } } func TestGetTagsV2(t *testing.T) { r := Registry{ Endpoint: "https://registry-1.docker.io", Version: "v2", } body := []byte("{\"name\": \"test\", \"tags\":[\"latest\"]}") tags, err := r.getTags(body) if err != nil { t.Errorf("Unexpected error: %v", err) } expectedTags := []string{"latest"} if !reflect.DeepEqual(tags, expectedTags) { t.Errorf("Unexpected tags: %v", tags) } } ================================================ FILE: pkg/utils/configlocation.go ================================================ package utils // ConfigLocation is a struct to store the location of kubeless configuration specific ConfigMap type ConfigLocation struct { Name string Namespace string } ================================================ FILE: pkg/utils/exec.go ================================================ package utils import ( "crypto/tls" "fmt" "io" "net/http" "sync" "github.com/sirupsen/logrus" "golang.org/x/net/websocket" "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" ) const ( stdinChannel = 0 stdoutChannel = 1 stderrChannel = 2 errChannel = 3 ) // Cmd stores information relevant to an individual remote command being run type Cmd struct { Stdin io.Reader Stdout io.Writer Stderr io.Writer } // RoundTripCallback is suitable to use with `ExecRoundTripper` and will // copy data to/from stdio channels. The returned `Response` is // currently always `nil`. func (c *Cmd) RoundTripCallback(conn *websocket.Conn) (*http.Response, error) { errChan := make(chan error, 3) wg := sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() if c.Stdin == nil { return } buf := make([]byte, 1025) // NB: first byte is fixed buf[0] = stdinChannel for { n, err := c.Stdin.Read(buf[1:]) err2 := websocket.Message.Send(conn, buf[:n+1]) if err == nil && err2 != nil { err = err2 } if err == io.EOF { break } else if err != nil { errChan <- err return } } const closeStatusNormal = 1000 conn.WriteClose(closeStatusNormal) }() go func() { defer wg.Done() for { var buf []byte err := websocket.Message.Receive(conn, &buf) if err == io.EOF { break } else if err != nil { errChan <- err return } if len(buf) == 0 { logrus.Debug("Received empty message, skipping") continue } logrus.Debugf("Received %dB message for channel %d", len(buf)-1, buf[0]) var w io.Writer switch buf[0] { case stdoutChannel: w = c.Stdout case stderrChannel: w = c.Stderr case errChannel: errChan <- fmt.Errorf("Error from remote command: %s", buf[1:]) return default: logrus.Infof("Ignoring message for unknown channel %d", buf[0]) continue } if w == nil { logrus.Infof("Ignoring message for nil channel %d", buf[0]) continue } _, err = w.Write(buf[1:]) if err != nil { errChan <- err return } } }() wg.Wait() close(errChan) err := <-errChan return &http.Response{ Status: "OK", StatusCode: 200, }, err } // A RoundTripCallback is used to process the websocket from an // individual command execution. type RoundTripCallback func(conn *websocket.Conn) (*http.Response, error) // WebsocketRoundTripper is an http.RoundTripper that invokes a // callback on a websocket connection. type WebsocketRoundTripper struct { TLSConfig *tls.Config Do RoundTripCallback } // RoundTrip implements the http.RoundTripper interface. func (d *WebsocketRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { referrer := r.Referer() if referrer == "" { referrer = "http://localhost/" } wsconf, err := websocket.NewConfig(r.URL.String(), referrer) if err != nil { return nil, err } wsconf.TlsConfig = d.TLSConfig wsconf.Header = r.Header wsconf.Protocol = []string{"channel.k8s.io"} conn, err := websocket.DialConfig(wsconf) if err != nil { return nil, err } conn.PayloadType = websocket.BinaryFrame defer conn.Close() return d.Do(conn) } // ExecRoundTripper creates a wrapped WebsocketRoundTripper func ExecRoundTripper(conf *rest.Config, f RoundTripCallback) (http.RoundTripper, error) { tlsConfig, err := rest.TLSConfigFor(conf) if err != nil { return nil, err } rt := &WebsocketRoundTripper{ Do: f, TLSConfig: tlsConfig, } return rest.HTTPWrappersForConfig(conf, rt) } // Exec returns an "exec" Request suitable for ExecRoundTripper. func Exec(client corev1.CoreV1Interface, pod, namespace string, opts v1.PodExecOptions) (*http.Request, error) { cl := client.RESTClient() req := cl.Verb("ignored"). Namespace(namespace). Resource("pods"). Name(pod). SubResource("exec"). VersionedParams(&opts, scheme.ParameterCodec) url := req.URL() switch url.Scheme { case "http": url.Scheme = "ws" case "https": url.Scheme = "wss" default: return nil, fmt.Errorf("Unrecognised URL scheme in %v", url) } // NB: Only some fields are honoured by our RoundTrip implementation return &http.Request{ URL: url, }, nil } ================================================ FILE: pkg/utils/exec_test.go ================================================ package utils import ( "testing" "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) func TestExecURL(t *testing.T) { conf := rest.Config{ Host: "https://example.com/", } clientset := kubernetes.NewForConfigOrDie(&conf) opts := v1.PodExecOptions{ Container: "ctr", Stderr: true, Command: []string{"a", "b"}, } req, err := Exec(clientset.Core(), "mypod", "myns", opts) if err != nil { t.Fatal("Exec error:", err) } t.Logf("Got URL %v", req.URL) if req.URL.String() != "wss://example.com/api/v1/namespaces/myns/pods/mypod/exec?command=a&command=b&container=ctr&stderr=true" { t.Error("Unexpected url:", req.URL) } } ================================================ FILE: pkg/utils/k8sutil.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "crypto/rand" "encoding/base64" "encoding/json" "fmt" "net/url" "os" "path/filepath" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" clientsetAPIExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1" // Auth plugins _ "k8s.io/client-go/plugin/pkg/client/auth" "github.com/imdario/mergo" "github.com/kubeless/kubeless/pkg/client/clientset/versioned" ) const ( defaultTimeout = "180" ) // GetClient returns a k8s clientset to the request from inside of cluster func GetClient() kubernetes.Interface { config, err := GetInClusterConfig() if err != nil { logrus.Fatalf("Can not get kubernetes config: %v", err) } clientset, err := kubernetes.NewForConfig(config) if err != nil { logrus.Fatalf("Can not create kubernetes client: %v", err) } return clientset } // BuildOutOfClusterConfig returns k8s config func BuildOutOfClusterConfig() (*rest.Config, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() kubeconfigEnv := os.Getenv("KUBECONFIG") if kubeconfigEnv == "" { home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") if home == "" { for _, h := range []string{"HOME", "USERPROFILE"} { if home = os.Getenv(h); home != "" { break } } } kubeconfigPath := filepath.Join(home, ".kube", "config") loadingRules.ExplicitPath = kubeconfigPath } config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( loadingRules, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, err } return config, nil } // GetClientOutOfCluster returns a k8s clientset to the request from outside of cluster func GetClientOutOfCluster() kubernetes.Interface { config, err := BuildOutOfClusterConfig() if err != nil { logrus.Fatalf("Can not get kubernetes config: %v", err) } clientset, err := kubernetes.NewForConfig(config) if err != nil { logrus.Fatalf("Can not get kubernetes client: %v", err) } return clientset } // GetAPIExtensionsClientOutOfCluster returns a k8s clientset to access APIExtensions from outside of cluster func GetAPIExtensionsClientOutOfCluster() clientsetAPIExtensions.Interface { config, err := BuildOutOfClusterConfig() if err != nil { logrus.Fatalf("Can not get kubernetes config: %v", err) } clientset, err := clientsetAPIExtensions.NewForConfig(config) if err != nil { logrus.Fatalf("Can not get kubernetes client: %v", err) } return clientset } // GetAPIExtensionsClientInCluster returns a k8s clientset to access APIExtensions from inside of cluster func GetAPIExtensionsClientInCluster() clientsetAPIExtensions.Interface { config, err := GetInClusterConfig() if err != nil { logrus.Fatalf("Can not get kubernetes config: %v", err) } clientset, err := clientsetAPIExtensions.NewForConfig(config) if err != nil { logrus.Fatalf("Can not get kubernetes client: %v", err) } return clientset } // GetFunctionClientInCluster returns function clientset to the request from inside of cluster func GetFunctionClientInCluster() (versioned.Interface, error) { config, err := GetInClusterConfig() if err != nil { return nil, err } kubelessClient, err := versioned.NewForConfig(config) if err != nil { return nil, err } return kubelessClient, nil } // GetKubelessClientOutCluster returns kubeless clientset to make kubeless API request from outside of cluster func GetKubelessClientOutCluster() (versioned.Interface, error) { config, err := BuildOutOfClusterConfig() if err != nil { return nil, err } kubelessClient, err := versioned.NewForConfig(config) if err != nil { return nil, err } return kubelessClient, nil } // GetDefaultNamespace returns the namespace set in current cluster context func GetDefaultNamespace() string { rules := clientcmd.NewDefaultClientConfigLoadingRules() rules.DefaultClientConfig = &clientcmd.DefaultClientConfig overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} if ns, _, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).Namespace(); err == nil { return ns } return v1.NamespaceDefault } // GetFunction returns specification of a function func GetFunction(funcName, ns string) (kubelessApi.Function, error) { kubelessClient, err := GetKubelessClientOutCluster() if err != nil { return kubelessApi.Function{}, err } f, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(funcName, metav1.GetOptions{}) if err != nil { if k8sErrors.IsNotFound(err) { logrus.Fatalf("Function %s is not found", funcName) } return kubelessApi.Function{}, err } return *f, nil } // CreateFunctionCustomResource will create a custom function object func CreateFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error { _, err := kubelessClient.KubelessV1beta1().Functions(f.Namespace).Create(f) if err != nil { return err } return nil } // UpdateFunctionCustomResource applies changes to the function custom object func UpdateFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error { _, err := kubelessClient.KubelessV1beta1().Functions(f.Namespace).Update(f) return err } // PatchFunctionCustomResource applies changes to the function custom object func PatchFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error { data, err := json.Marshal(f) if err != nil { return err } _, err = kubelessClient.KubelessV1beta1().Functions(f.Namespace).Patch(f.Name, types.MergePatchType, data) return err } // DeleteFunctionCustomResource will delete custom function object func DeleteFunctionCustomResource(kubelessClient versioned.Interface, funcName, ns string) error { err := kubelessClient.KubelessV1beta1().Functions(ns).Delete(funcName, &metav1.DeleteOptions{}) if err != nil { return err } return nil } // GetFunctionCustomResource will delete custom function object func GetFunctionCustomResource(kubelessClient versioned.Interface, funcName, ns string) (*kubelessApi.Function, error) { functionObj, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(funcName, metav1.GetOptions{}) if err != nil { return nil, err } return functionObj, nil } // GetPodsByLabel returns list of pods which match the label // We use this to returns pods to which the function is deployed or pods running controllers func GetPodsByLabel(c kubernetes.Interface, ns, k, v string) (*v1.PodList, error) { pods, err := c.Core().Pods(ns).List(metav1.ListOptions{ LabelSelector: k + "=" + v, }) if err != nil { return nil, err } return pods, nil } // GetReadyPod returns the first pod has passed the liveness probe check func GetReadyPod(pods *v1.PodList) (v1.Pod, error) { for _, pod := range pods.Items { isPodRunning := true for _, containerStatus := range pod.Status.ContainerStatuses { if !containerStatus.Ready { isPodRunning = false break } } if isPodRunning { return pod, nil } } return v1.Pod{}, fmt.Errorf("there is no pod ready") } // GetLocalHostname returns hostname func GetLocalHostname(config *rest.Config, funcName string) (string, error) { url, err := url.Parse(config.Host) if err != nil { return "", err } host := url.Hostname() return fmt.Sprintf("%s.%s.nip.io", funcName, host), nil } func doRESTReq(restIface rest.Interface, groupVersion, verb, resource, elem, namespace string, body interface{}, result interface{}) error { var req *rest.Request bodyJSON := []byte{} var err error if body != nil { bodyJSON, err = json.Marshal(body) if err != nil { return err } } switch verb { case "get": req = restIface.Get().Name(elem) break case "create": req = restIface.Post().Body(bodyJSON) break case "update": req = restIface.Put().Name(elem).Body(bodyJSON) break default: return fmt.Errorf("Verb %s not supported", verb) } rawResponse, err := req.AbsPath("apis", groupVersion, "namespaces", namespace, resource).DoRaw() if err != nil { return err } if result != nil { err = json.Unmarshal(rawResponse, result) if err != nil { return err } } return nil } // CreateAutoscale creates HPA object for function func CreateAutoscale(client kubernetes.Interface, hpa v2beta1.HorizontalPodAutoscaler) error { _, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Create(&hpa) return err } // UpdateAutoscale updates an existing HPA object for a function func UpdateAutoscale(client kubernetes.Interface, hpa v2beta1.HorizontalPodAutoscaler) error { _, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Update(&hpa) return err } // DeleteAutoscale deletes an autoscale rule func DeleteAutoscale(client kubernetes.Interface, name, ns string) error { err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Delete(name, &metav1.DeleteOptions{}) if err != nil && !k8sErrors.IsNotFound(err) { return err } return nil } // DeleteServiceMonitor cleans the sm if it exists func DeleteServiceMonitor(smclient monitoringv1alpha1.MonitoringV1alpha1Client, name, ns string) error { err := smclient.ServiceMonitors(ns).Delete(name, &metav1.DeleteOptions{}) if err != nil && !k8sErrors.IsNotFound(err) { return err } return nil } // InitializeEmptyMapsInDeployment initializes all nil maps in a Deployment object // This is done to counteract with side-effects of github.com/imdario/mergo which panics when provided with a nil map in a struct func initializeEmptyMapsInDeployment(deployment *appsv1.Deployment) { if deployment.ObjectMeta.Annotations == nil { deployment.Annotations = make(map[string]string) } if deployment.ObjectMeta.Labels == nil { deployment.ObjectMeta.Labels = make(map[string]string) } if deployment.Spec.Selector != nil && deployment.Spec.Selector.MatchLabels == nil { deployment.ObjectMeta.Labels = make(map[string]string) } if deployment.Spec.Template.ObjectMeta.Annotations == nil { deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) } if deployment.Spec.Template.ObjectMeta.Labels == nil { deployment.Spec.Template.ObjectMeta.Labels = make(map[string]string) } if deployment.Spec.Template.Spec.NodeSelector == nil { deployment.Spec.Template.Spec.NodeSelector = make(map[string]string) } } // MergeDeployments merges two deployment objects func MergeDeployments(destinationDeployment *appsv1.Deployment, sourceDeployment *appsv1.Deployment) error { // Initializing nil maps in deployment objects else github.com/imdario/mergo panics initializeEmptyMapsInDeployment(destinationDeployment) initializeEmptyMapsInDeployment(sourceDeployment) err := mergo.Merge(destinationDeployment, sourceDeployment) // Merge containers if err == nil && len(sourceDeployment.Spec.Template.Spec.Containers) > 0 { srcContainers := sourceDeployment.Spec.Template.Spec.Containers dstContainers := destinationDeployment.Spec.Template.Spec.Containers // Merge each container individually for i, srcContainer := range srcContainers { if i >= len(dstContainers) { destinationDeployment.Spec.Template.Spec.Containers[i] = srcContainer continue } dstContainer := dstContainers[i] // Use mergo.WithAppendSlice to append extra volumeMount/env/port definitions err = mergo.Merge(&dstContainer, srcContainer, mergo.WithAppendSlice) if err != nil { break } destinationDeployment.Spec.Template.Spec.Containers[i] = dstContainer } } return err } // FunctionObjAddFinalizer add specified finalizer string to function object func FunctionObjAddFinalizer(kubelessClient versioned.Interface, funcObj *kubelessApi.Function, finalizerString string) error { funcObjClone := funcObj.DeepCopy() funcObjClone.ObjectMeta.Finalizers = append(funcObjClone.ObjectMeta.Finalizers, finalizerString) return UpdateFunctionCustomResource(kubelessClient, funcObjClone) } // FunctionObjHasFinalizer checks if function object already has the Function controller finalizer func FunctionObjHasFinalizer(funcObj *kubelessApi.Function, finalizerString string) bool { currentFinalizers := funcObj.ObjectMeta.Finalizers for _, f := range currentFinalizers { if f == finalizerString { return true } } return false } // FunctionObjRemoveFinalizer removes the finalizer from the function object func FunctionObjRemoveFinalizer(kubelessClient versioned.Interface, funcObj *kubelessApi.Function, finalizerString string) error { funcObjClone := funcObj.DeepCopy() newSlice := make([]string, 0) for _, item := range funcObj.ObjectMeta.Finalizers { if item == finalizerString { continue } newSlice = append(newSlice, item) } if len(newSlice) == 0 { newSlice = nil } funcObjClone.ObjectMeta.Finalizers = newSlice err := UpdateFunctionCustomResource(kubelessClient, funcObjClone) return err } // GetAnnotationsFromCRD gets annotations from a CustomResourceDefinition func GetAnnotationsFromCRD(clientset clientsetAPIExtensions.Interface, name string) (map[string]string, error) { crd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) if err != nil { return nil, err } return crd.GetAnnotations(), nil } // GetRandString returns a random string of lenght N func GetRandString(n int) (string, error) { b := make([]byte, n) if _, err := rand.Read(b); err != nil { return "", err } return base64.RawURLEncoding.EncodeToString(b), nil } // GetSecretsAsLocalObjectReference returns a list of LocalObjectReference based on secret names func GetSecretsAsLocalObjectReference(secrets ...string) []v1.LocalObjectReference { res := []v1.LocalObjectReference{} for _, secret := range secrets { if secret != "" { res = append(res, v1.LocalObjectReference{Name: secret}) } } return res } ================================================ FILE: pkg/utils/k8sutil_test.go ================================================ package utils import ( "bytes" "encoding/json" "io" "io/ioutil" "testing" appsv1 "k8s.io/api/apps/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" fakeextensionsapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ktesting "k8s.io/client-go/testing" ) func objBody(object interface{}) io.ReadCloser { output, err := json.Marshal(object) if err != nil { panic(err) } return ioutil.NopCloser(bytes.NewReader([]byte(output))) } func fakeConfig() *rest.Config { return &rest.Config{ Host: "https://example.com:443", ContentConfig: rest.ContentConfig{ GroupVersion: &schema.GroupVersion{ Group: "", Version: "v1", }, NegotiatedSerializer: scheme.Codecs, }, } } func TestGetLocalHostname(t *testing.T) { config := fakeConfig() expectedHostName := "foobar.example.com.nip.io" actualHostName, err := GetLocalHostname(config, "foobar") if err != nil { t.Error(err) } if expectedHostName != actualHostName { t.Errorf("Expected %s but got %s", expectedHostName, actualHostName) } } func TestCreateAutoscaleResource(t *testing.T) { clientset := fake.NewSimpleClientset() name := "foo" ns := "myns" hpaDef := v2beta1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, } if err := CreateAutoscale(clientset, hpaDef); err != nil { t.Fatalf("Creating autoscale returned err: %v", err) } hpa, err := clientset.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Get(name, metav1.GetOptions{}) if err != nil { t.Fatalf("Creating autoscale returned err: %v", err) } if hpa.ObjectMeta.Name != "foo" { t.Fatalf("Creating wrong scale target name") } } func TestUpdateAutoscaleResource(t *testing.T) { clientset := fake.NewSimpleClientset() name := "foo" ns := "myns" // Create a pre-existing HPA hpaDef := v2beta1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, } if err := CreateAutoscale(clientset, hpaDef); err != nil { t.Fatalf("Creating autoscale returned err: %v", err) } // Perform an update hpaDef = v2beta1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, Labels: map[string]string{ "baz": "qux", }, }, } if err := UpdateAutoscale(clientset, hpaDef); err != nil { t.Fatalf("Updating autoscale returned err: %v", err) } hpa, err := clientset.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Get(name, metav1.GetOptions{}) if err != nil { t.Fatalf("Updating autoscale returned err: %v", err) } if hpa.ObjectMeta.Name != "foo" { t.Fatalf("Updating wrong scale target name") } } func TestDeleteAutoscaleResource(t *testing.T) { myNsFoo := metav1.ObjectMeta{ Namespace: "myns", Name: "foo", } as := v2beta1.HorizontalPodAutoscaler{ ObjectMeta: myNsFoo, } clientset := fake.NewSimpleClientset(&as) if err := DeleteAutoscale(clientset, "foo", "myns"); err != nil { t.Fatalf("Deleting autoscale returned err: %v", err) } a := clientset.Actions() if ns := a[0].GetNamespace(); ns != "myns" { t.Errorf("deleted autoscale from wrong namespace (%s)", ns) } if name := a[0].(ktesting.DeleteAction).GetName(); name != "foo" { t.Errorf("deleted autoscale with wrong name (%s)", name) } } func TestInitializeEmptyMapsInDeployment(t *testing.T) { deployment := appsv1.Deployment{} deployment.Spec.Selector = &metav1.LabelSelector{} initializeEmptyMapsInDeployment(&deployment) if deployment.ObjectMeta.Annotations == nil { t.Fatal("ObjectMeta.Annotations map is nil") } if deployment.ObjectMeta.Labels == nil { t.Fatal("ObjectMeta.Labels map is nil") } if deployment.Spec.Selector == nil && deployment.Spec.Selector.MatchLabels == nil { t.Fatal("deployment.Spec.Selector.MatchLabels is nil") } if deployment.Spec.Template.ObjectMeta.Labels == nil { t.Fatal("deployment.Spec.Template.ObjectMeta.Labels map is nil") } if deployment.Spec.Template.ObjectMeta.Annotations == nil { t.Fatal("deployment.Spec.Template.ObjectMeta.Annotations map is nil") } if deployment.Spec.Template.Spec.NodeSelector == nil { t.Fatal("deployment.Spec.Template.Spec.NodeSelector map is nil") } } func TestMergeDeployments(t *testing.T) { var dstReplicas int32 dstReplicas = 10 destinationDeployment := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "foo1-deploy": "bar", }, }, Spec: appsv1.DeploymentSpec{ Replicas: &dstReplicas, Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ { VolumeMounts: []corev1.VolumeMount{ { Name: "foo", MountPath: "/bar", }, }, Resources: corev1.ResourceRequirements{}, }, }, }, }, }, } var srcReplicas int32 srcReplicas = 8 sourceDeployment := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "foo2-deploy": "bar", }, }, Spec: appsv1.DeploymentSpec{ Replicas: &srcReplicas, Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ { VolumeMounts: []corev1.VolumeMount{ { Name: "baz", MountPath: "/qux", }, }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("100m"), corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"), }, }, }, }, }, }, }, } var expectedReplicas int32 expectedReplicas = 10 expectedDeployment := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "foo1-deploy": "bar", "foo2-deploy": "bar", }, }, Spec: appsv1.DeploymentSpec{ Replicas: &expectedReplicas, Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ { VolumeMounts: []corev1.VolumeMount{ { Name: "foo", MountPath: "/bar", }, { Name: "baz", MountPath: "/qux", }, }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("100m"), corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"), }, }, }, }, }, }, }, } MergeDeployments(&destinationDeployment, &sourceDeployment) mergedContainerCount := len(destinationDeployment.Spec.Template.Spec.Containers) if mergedContainerCount != 1 { t.Fatalf("Expecting 1 container but received %v", mergedContainerCount) } expectedAnnotations := expectedDeployment.ObjectMeta.Annotations mergedAnnotations := destinationDeployment.ObjectMeta.Annotations for i := range expectedAnnotations { if mergedAnnotations[i] != expectedAnnotations[i] { t.Fatalf("Expecting annotation %s but received %s", expectedAnnotations[i], mergedAnnotations[i]) } } mergedReplicas := *destinationDeployment.Spec.Replicas if mergedReplicas != expectedReplicas { t.Fatalf("Expecting 8 replicas but received %v", *destinationDeployment.Spec.Replicas) } expectedVolumeMountCount := 2 mergedVolumeMountCount := len(destinationDeployment.Spec.Template.Spec.Containers[0].VolumeMounts) if mergedVolumeMountCount != expectedVolumeMountCount { t.Fatalf("Expecting %v volumeMounts but received %v", expectedVolumeMountCount, mergedVolumeMountCount) } expectedCPURequest := expectedDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceCPU)] mergedCPURequest := destinationDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceCPU)] if mergedCPURequest != expectedCPURequest { t.Fatalf( "Expecting %s cpu resource request but received %s", expectedCPURequest.String(), mergedCPURequest.String(), ) } expectedMemoryRequest := expectedDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceMemory)] mergedMemoryRequest := destinationDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceMemory)] if mergedMemoryRequest != expectedMemoryRequest { t.Fatalf( "Expecting %s memory resource request but received %s", expectedMemoryRequest.String(), mergedMemoryRequest.String(), ) } } func TestGetAnnotationsFromCRD(t *testing.T) { crdWithoutAnnotationName := "crdWithoutAnnotation" crdWithAnnotationName := "crdWithAnnotation" expectedAnnotations := map[string]string{ "foo": "bar", } crdWithAnnotation := &extensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "foo": "bar", }, Name: crdWithAnnotationName, }, Spec: extensionsv1beta1.CustomResourceDefinitionSpec{ Group: "foo.group.io", Names: extensionsv1beta1.CustomResourceDefinitionNames{ Plural: "foos", Singular: "foo", Kind: "fooKind", ListKind: "fooList", }, }, } clientset := fakeextensionsapi.NewSimpleClientset() _, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crdWithAnnotation) if err != nil { t.Fatalf("Error while creating CRD: %v", err) } annotations, err := GetAnnotationsFromCRD(clientset, crdWithAnnotationName) if err != nil { t.Fatalf("Error while fetching CRD: %v", err) } for i := range expectedAnnotations { if annotations[i] != expectedAnnotations[i] { t.Errorf("Expecting annotation %s but received %s", expectedAnnotations[i], annotations[i]) } } crdWithoutAnnotation := &extensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, Name: crdWithoutAnnotationName, }, Spec: extensionsv1beta1.CustomResourceDefinitionSpec{ Group: "foo.group.io", Names: extensionsv1beta1.CustomResourceDefinitionNames{ Plural: "foos", Singular: "foo", Kind: "fooKind", ListKind: "fooList", }, }, } _, err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crdWithoutAnnotation) if err != nil { t.Fatalf("Error while creating CRD: %v", err) } annotations, err = GetAnnotationsFromCRD(clientset, crdWithoutAnnotationName) if err != nil { t.Fatalf("Error while fetching annotations from CRD: %v", err) } if len(annotations) != 0 { t.Errorf("Expecting annotations of length 0 but received length %d", len(annotations)) } } ================================================ FILE: pkg/utils/kubelessutil.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path" "path/filepath" "strconv" "strings" "unicode/utf8" monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1" "github.com/ghodss/yaml" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/langruntime" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" clientsetAPIExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) // secretsMountPath is the file system path where volumes populated with secrets are mounted. const secretsMountPath = "/var/run/secrets/kubeless.io" // GetFunctionPort returns the port for a function service func GetFunctionPort(clientset kubernetes.Interface, namespace, functionName string) (string, error) { svc, err := clientset.CoreV1().Services(namespace).Get(functionName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("Unable to find the service for function %s", functionName) } return strconv.Itoa(int(svc.Spec.Ports[0].Port)), nil } // IsJSON returns true if the string is json func IsJSON(s string) bool { var js map[string]interface{} return json.Unmarshal([]byte(s), &js) == nil } func appendToCommand(orig string, command ...string) string { if len(orig) > 0 { return fmt.Sprintf("%s && %s", orig, strings.Join(command, " && ")) } return strings.Join(command, " && ") } func getProvisionContainer(function, checksum, fileName, handler, contentType, runtime, prepareImage string, runtimeVolume, depsVolume v1.VolumeMount, resources v1.ResourceRequirements, lr *langruntime.Langruntimes) (v1.Container, error) { prepareCommand := "" originFile := path.Join(depsVolume.MountPath, fileName) // Prepare Function file and dependencies if strings.Contains(contentType, "base64") { // File is encoded in base64 decodedFile := "/tmp/func.decoded" prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("base64 -d < %s > %s", originFile, decodedFile)) originFile = decodedFile } else if strings.Contains(contentType, "url") { fromURLFile := "/tmp/func.fromurl" prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("curl '%s' -L --silent --output %s", function, fromURLFile)) originFile = fromURLFile } else if strings.Contains(contentType, "text") || contentType == "" { // Assumming that function is plain text // So we don't need to preprocess it } else { return v1.Container{}, fmt.Errorf("Unable to prepare function of type %s: Unknown format", contentType) } // Validate checksum if checksum == "" { // DEPRECATED: Checksum may be empty } else { checksumInfo := strings.Split(checksum, ":") switch checksumInfo[0] { case "sha256": shaFile := "/tmp/func.sha256" prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("echo '%s %s' > %s", checksumInfo[1], originFile, shaFile), fmt.Sprintf("sha256sum -c %s", shaFile), ) break default: return v1.Container{}, fmt.Errorf("Unable to verify checksum %s: Unknown format", checksum) } } if strings.Contains(contentType, "zip") { // Extract content in case it is a Zip file prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("unzip -o %s -d %s", originFile, runtimeVolume.MountPath), ) } else if strings.Contains(contentType, "compressedtar") { // Extract content in case it is a compressed tar file. // The `tar` command auto-detects the compression type. prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("tar xf %s -C %s", originFile, runtimeVolume.MountPath), ) } else { // Copy the target as a single file destFileName, err := getFileName(handler, contentType, runtime, lr) if err != nil { return v1.Container{}, err } dest := path.Join(runtimeVolume.MountPath, destFileName) prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("cp %s %s", originFile, dest), ) } // Copy deps file to the installation path runtimeInf, err := lr.GetRuntimeInfo(runtime) if err == nil && runtimeInf.DepName != "" && !strings.Contains(contentType, "deps") { depsFile := path.Join(depsVolume.MountPath, runtimeInf.DepName) prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("cp %s %s", depsFile, runtimeVolume.MountPath), ) } return v1.Container{ Name: "prepare", Image: prepareImage, Command: []string{"sh", "-c"}, Args: []string{prepareCommand}, VolumeMounts: []v1.VolumeMount{runtimeVolume, depsVolume}, ImagePullPolicy: v1.PullIfNotPresent, Resources: resources, }, nil } func addDefaultLabel(labels map[string]string) map[string]string { if labels == nil { labels = make(map[string]string) } labels["created-by"] = "kubeless" return labels } func hasDefaultLabel(labels map[string]string) bool { if labels == nil || labels["created-by"] != "kubeless" { return false } return true } func splitHandler(handler string) (string, string, error) { str := strings.Split(handler, ".") if len(str) != 2 { return "", "", fmt.Errorf("failed: incorrect handler format. It should be module_name.handler_name") } return str[0], str[1], nil } // getFileName returns a file name based on a handler identifier func getFileName(handler, funcContentType, runtime string, lr *langruntime.Langruntimes) (string, error) { modName, _, err := splitHandler(handler) if err != nil { return "", err } filename := modName if funcContentType == "text" || funcContentType == "" || funcContentType == "url" || funcContentType == "base64" { // We can only guess the extension if the function is specified as plain text runtimeInf, err := lr.GetRuntimeInfo(runtime) if err == nil { filename = modName + runtimeInf.FileNameSuffix } } return filename, nil } // EnsureFuncConfigMap creates/updates a config map with a function specification func EnsureFuncConfigMap(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes) error { configMapData := map[string]string{} var err error if funcObj.Spec.Handler != "" { fileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr) if err != nil { return err } configMapData = map[string]string{ "handler": funcObj.Spec.Handler, fileName: funcObj.Spec.Function, } runtimeInfo, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime) if err == nil && runtimeInfo.DepName != "" { configMapData[runtimeInfo.DepName] = funcObj.Spec.Deps } } configMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: funcObj.ObjectMeta.Name, Labels: addDefaultLabel(funcObj.ObjectMeta.Labels), OwnerReferences: or, }, Data: configMapData, } _, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Create(configMap) if err != nil && k8sErrors.IsAlreadyExists(err) { // In case the ConfigMap already exists we should update // just certain fields (to avoid race conditions) var newConfigMap *v1.ConfigMap newConfigMap, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { return err } if !hasDefaultLabel(newConfigMap.ObjectMeta.Labels) { return fmt.Errorf("Found a conflicting configmap object %s/%s. Aborting", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name) } newConfigMap.ObjectMeta.Labels = funcObj.ObjectMeta.Labels newConfigMap.ObjectMeta.OwnerReferences = or newConfigMap.Data = configMap.Data _, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Update(newConfigMap) if err != nil && k8sErrors.IsAlreadyExists(err) { // The configmap may already exist and there is nothing to update return nil } } return err } // this function resolves backward incompatibility in case user uses old client which doesn't include serviceSpec into funcSpec. // if serviceSpec is empty, we will use the default serviceSpec whose port is 8080 func serviceSpec(funcObj *kubelessApi.Function) v1.ServiceSpec { if len(funcObj.Spec.ServiceSpec.Ports) == 0 { return v1.ServiceSpec{ Ports: []v1.ServicePort{ { // Note: Prefix: "http-" is added to adapt to Istio so that it can discover the function services Name: "http-function-port", Protocol: v1.ProtocolTCP, Port: 8080, TargetPort: intstr.FromInt(8080), }, }, Selector: funcObj.ObjectMeta.Labels, Type: v1.ServiceTypeClusterIP, } } return funcObj.Spec.ServiceSpec } // EnsureFuncService creates/updates a function service func EnsureFuncService(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference) error { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: funcObj.ObjectMeta.Name, Labels: addDefaultLabel(funcObj.ObjectMeta.Labels), OwnerReferences: or, }, Spec: serviceSpec(funcObj), } _, err := client.Core().Services(funcObj.ObjectMeta.Namespace).Create(svc) if err != nil && k8sErrors.IsAlreadyExists(err) { // In case the SVC already exists we should update // just certain fields (to avoid race conditions) var newSvc *v1.Service newSvc, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { return err } if !hasDefaultLabel(newSvc.ObjectMeta.Labels) { return fmt.Errorf("Found a conflicting service object %s/%s. Aborting", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name) } newSvc.ObjectMeta.Labels = funcObj.ObjectMeta.Labels newSvc.ObjectMeta.OwnerReferences = or newSvc.Spec.Ports = svc.Spec.Ports _, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Update(newSvc) if err != nil && k8sErrors.IsAlreadyExists(err) { // The service may already exist and there is nothing to update return nil } } return err } func getRuntimeVolumeMount(name string) v1.VolumeMount { return v1.VolumeMount{ Name: name, MountPath: "/kubeless", } } func getChecksum(content string) (string, error) { h := sha256.New() _, err := h.Write([]byte(content)) if err != nil { return "", nil } return hex.EncodeToString(h.Sum(nil)), nil } // populatePodSpec populates a basic Pod Spec that uses init containers to populate // the runtime container with the function content and its dependencies. // The caller should define the runtime container(s). // It accepts a prepopulated podSpec with default information and volume that the // runtime container should mount func populatePodSpec(funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, podSpec *v1.PodSpec, runtimeVolumeMount v1.VolumeMount, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error { depsVolumeName := funcObj.ObjectMeta.Name + "-deps" result := podSpec if len(imagePullSecrets) > 0 { result.ImagePullSecrets = imagePullSecrets } result.Volumes = append(podSpec.Volumes, v1.Volume{ Name: runtimeVolumeMount.Name, VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }, v1.Volume{ Name: depsVolumeName, VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{ Name: funcObj.ObjectMeta.Name, }, }, }, }, ) // prepare init-containers if some function is specified resources := v1.ResourceRequirements{} if len(funcObj.Spec.Deployment.Spec.Template.Spec.InitContainers) > 0 { resources = funcObj.Spec.Deployment.Spec.Template.Spec.InitContainers[0].Resources } if funcObj.Spec.Function != "" { fileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr) if err != nil { return err } srcVolumeMount := v1.VolumeMount{ Name: depsVolumeName, MountPath: "/src", } provisionContainer, err := getProvisionContainer( funcObj.Spec.Function, funcObj.Spec.Checksum, fileName, funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, provisionImage, runtimeVolumeMount, srcVolumeMount, resources, lr, ) if err != nil { return err } result.InitContainers = []v1.Container{provisionContainer} } // add the image secrets if present to pull images from private docker registry if funcObj.Spec.Runtime != "" { imageSecrets, err := lr.GetImageSecrets(funcObj.Spec.Runtime) if err != nil { return fmt.Errorf("Unable to fetch ImagePullSecrets, %v", err) } result.ImagePullSecrets = append(result.ImagePullSecrets, imageSecrets...) } // ensure that the runtime is supported for installing dependencies _, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime) envVars := []v1.EnvVar{} if len(result.Containers) > 0 { envVars = result.Containers[0].Env } hasDeps := funcObj.Spec.Deps != "" || strings.Contains(funcObj.Spec.FunctionContentType, "deps") if hasDeps && err != nil { return fmt.Errorf("Unable to install dependencies for the runtime %s", funcObj.Spec.Runtime) } else if hasDeps { depsChecksum := "" if funcObj.Spec.Deps != "" { depsChecksum, err = getChecksum(funcObj.Spec.Deps) if err != nil { return fmt.Errorf("Unable to obtain dependencies checksum: %v", err) } } depsInstallContainer, err := lr.GetBuildContainer(funcObj.Spec.Runtime, depsChecksum, envVars, runtimeVolumeMount, resources) if err != nil { return err } if depsInstallContainer.Name != "" { result.InitContainers = append( result.InitContainers, depsInstallContainer, ) } } // add compilation init container if needed _, funcName, _ := splitHandler(funcObj.Spec.Handler) compContainer, err := lr.GetCompilationContainer(funcObj.Spec.Runtime, funcName, envVars, runtimeVolumeMount, resources) if err != nil { return err } if compContainer != nil { result.InitContainers = append( result.InitContainers, *compContainer, ) } // mount volumes with init container secrets specified in runtime configuration lr.ReadConfigMap() for i := 0; i < len(result.InitContainers); i++ { secrets, err := lr.GetInitContainerSecrets(funcObj.Spec.Runtime, result.InitContainers[i].Name) if err != nil { return fmt.Errorf("Unable to fetch init container secrets for runtime %s at phase %s: %v", funcObj.Spec.Runtime, result.InitContainers[i].Name, err) } for _, secret := range secrets { // add volume if not available in the pod spec already var found bool for _, vol := range result.Volumes { if vol.Name == secret.Name && (vol.Secret == nil || vol.Secret.SecretName != secret.Name) { return fmt.Errorf("Unable to add volume for secret %s, volume already defined %#v", secret.Name, vol) } if vol.Name == secret.Name && vol.Secret != nil && vol.Secret.SecretName == secret.Name { found = true break } } if !found { result.Volumes = append(result.Volumes, v1.Volume{ Name: secret.Name, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{SecretName: secret.Name}, }, }) } // add volume mount to the init container result.InitContainers[i].VolumeMounts = append(result.InitContainers[i].VolumeMounts, v1.VolumeMount{ Name: secret.Name, ReadOnly: true, MountPath: filepath.Join(secretsMountPath, secret.Name), }) } } return nil } // EnsureFuncImage creates a Job to build a function image func EnsureFuncImage(client kubernetes.Interface, funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, or []metav1.OwnerReference, imageName, tag, builderImage, registryHost, dockerSecretName, provisionImage string, registryTLSEnabled bool, imagePullSecrets []v1.LocalObjectReference) error { if len(tag) < 64 { return errors.New("Expecting sha256 as image tag") } jobName := fmt.Sprintf("build-%s-%s", funcObj.ObjectMeta.Name, tag[0:10]) _, err := client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Get(jobName, metav1.GetOptions{}) if err == nil { // The job already exists logrus.Infof("Found a previous job for building %s:%s", imageName, tag) return nil } podSpec := v1.PodSpec{ RestartPolicy: v1.RestartPolicyOnFailure, } runtimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name) err = populatePodSpec(funcObj, lr, &podSpec, runtimeVolumeMount, provisionImage, imagePullSecrets) if err != nil { return err } // Add a final initContainer to create the function bundle.tar prepareContainer := v1.Container{} for _, c := range podSpec.InitContainers { if c.Name == "prepare" { prepareContainer = c } } podSpec.InitContainers = append(podSpec.InitContainers, v1.Container{ Name: "bundle", Command: []string{"sh", "-c"}, Args: []string{fmt.Sprintf("tar cvf %s/bundle.tar %s/*", runtimeVolumeMount.MountPath, runtimeVolumeMount.MountPath)}, VolumeMounts: prepareContainer.VolumeMounts, Image: provisionImage, }) buildJob := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, Namespace: funcObj.ObjectMeta.Namespace, OwnerReferences: or, Labels: addDefaultLabel(map[string]string{ "function": funcObj.ObjectMeta.Name, }), }, Spec: batchv1.JobSpec{ Template: v1.PodTemplateSpec{ Spec: podSpec, }, }, } baseImage, err := lr.GetFunctionImage(funcObj.Spec.Runtime) if err != nil { return err } // Registry volume dockerCredsVol := dockerSecretName dockerCredsVolMountPath := "/docker" registryCredsVolume := v1.Volume{ Name: dockerCredsVol, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: dockerSecretName, }, }, } buildJob.Spec.Template.Spec.Volumes = append(buildJob.Spec.Template.Spec.Volumes, registryCredsVolume) args := []string{ "/imbuilder", "add-layer", } if !registryTLSEnabled { args = append(args, "--insecure") } args = append(args, "--src", fmt.Sprintf("docker://%s", baseImage), "--dst", fmt.Sprintf("docker://%s/%s:%s", registryHost, imageName, tag), fmt.Sprintf("%s/bundle.tar", podSpec.InitContainers[0].VolumeMounts[0].MountPath), ) // Add main container buildJob.Spec.Template.Spec.Containers = []v1.Container{ { Name: "build", Image: builderImage, VolumeMounts: append(prepareContainer.VolumeMounts, v1.VolumeMount{ Name: dockerCredsVol, MountPath: dockerCredsVolMountPath, }, ), Env: []v1.EnvVar{ { Name: "DOCKER_CONFIG_FOLDER", Value: dockerCredsVolMountPath, }, }, Args: args, }, } // Create the job if doesn't exists yet _, err = client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Create(&buildJob) if err == nil { logrus.Infof("Started function build job %s", jobName) } return err } func svcTargetPort(funcObj *kubelessApi.Function) int32 { if len(funcObj.Spec.ServiceSpec.Ports) == 0 { return int32(8080) } return int32(funcObj.Spec.ServiceSpec.Ports[0].TargetPort.IntValue()) } func mergeMap(dst, src map[string]string) map[string]string { if len(dst) == 0 { dst = make(map[string]string) } for k, v := range src { dst[k] = v } return dst } // EnsureFuncDeployment creates/updates a function deployment func EnsureFuncDeployment(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes, prebuiltRuntimeImage, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error { var err error podAnnotations := map[string]string{ // Attempt to attract the attention of prometheus. // For runtimes that don't support /metrics, // prometheus will get a 404 and mostly silently // ignore the pod (still displayed in the list of // "targets") "prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": strconv.Itoa(int(svcTargetPort(funcObj))), } maxUnavailable := intstr.FromInt(0) // add deployment and copy all func's Spec.Deployment to the deployment dpm := funcObj.Spec.Deployment.DeepCopy() dpm.OwnerReferences = or dpm.ObjectMeta.Name = funcObj.ObjectMeta.Name dpm.Spec.Selector = &metav1.LabelSelector{ MatchLabels: map[string]string{"created-by": funcObj.ObjectMeta.Labels["created-by"], "function": funcObj.ObjectMeta.Labels["function"]}, } dpm.Spec.Strategy = appsv1.DeploymentStrategy{ RollingUpdate: &appsv1.RollingUpdateDeployment{ MaxUnavailable: &maxUnavailable, }, } // append data to dpm deployment dpm.Labels = addDefaultLabel(mergeMap(dpm.Labels, funcObj.Labels)) dpm.Spec.Template.Labels = mergeMap(dpm.Spec.Template.Labels, funcObj.Labels) dpm.Annotations = mergeMap(dpm.Annotations, funcObj.Annotations) dpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, funcObj.Annotations) dpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, podAnnotations) if len(dpm.Spec.Template.Spec.Containers) == 0 { dpm.Spec.Template.Spec.Containers = append(dpm.Spec.Template.Spec.Containers, v1.Container{}) } runtimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name) if funcObj.Spec.Handler != "" && funcObj.Spec.Function != "" { modName, handlerName, err := splitHandler(funcObj.Spec.Handler) if err != nil { return err } // only resolve the image name and build the function if it has not been built already if dpm.Spec.Template.Spec.Containers[0].Image == "" && prebuiltRuntimeImage == "" { err := populatePodSpec(funcObj, lr, &dpm.Spec.Template.Spec, runtimeVolumeMount, provisionImage, imagePullSecrets) if err != nil { return err } imageName, err := lr.GetFunctionImage(funcObj.Spec.Runtime) if err != nil { return err } dpm.Spec.Template.Spec.Containers[0].Image = imageName dpm.Spec.Template.Spec.Containers[0].VolumeMounts = append(dpm.Spec.Template.Spec.Containers[0].VolumeMounts, runtimeVolumeMount) } else { if dpm.Spec.Template.Spec.Containers[0].Image == "" { dpm.Spec.Template.Spec.Containers[0].Image = prebuiltRuntimeImage } dpm.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets } timeout := funcObj.Spec.Timeout if timeout == "" { // Set default timeout to 180 seconds timeout = defaultTimeout } dpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{ Name: "FUNC_HANDLER", Value: handlerName, }, v1.EnvVar{ Name: "MOD_NAME", Value: modName, }, v1.EnvVar{ Name: "FUNC_TIMEOUT", Value: timeout, }, v1.EnvVar{ Name: "FUNC_RUNTIME", Value: funcObj.Spec.Runtime, }, v1.EnvVar{ Name: "FUNC_MEMORY_LIMIT", Value: dpm.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), }, ) } else { logrus.Warn("Expected non-empty handler and non-empty function content") } dpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{ Name: "FUNC_PORT", Value: strconv.Itoa(int(svcTargetPort(funcObj))), }, ) dpm.Spec.Template.Spec.Containers[0].Name = funcObj.ObjectMeta.Name dpm.Spec.Template.Spec.Containers[0].Ports = append(dpm.Spec.Template.Spec.Containers[0].Ports, v1.ContainerPort{ ContainerPort: svcTargetPort(funcObj), }) // update deployment for loading dependencies lr.UpdateDeployment(dpm, runtimeVolumeMount.MountPath, funcObj.Spec.Runtime) livenessProbeInfo := lr.GetLivenessProbeInfo(funcObj.Spec.Runtime, int(svcTargetPort(funcObj))) if dpm.Spec.Template.Spec.Containers[0].LivenessProbe == nil { dpm.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbeInfo } // Add security context runtimeUser := int64(1000) if dpm.Spec.Template.Spec.SecurityContext == nil { dpm.Spec.Template.Spec.SecurityContext = &v1.PodSecurityContext{ RunAsUser: &runtimeUser, FSGroup: &runtimeUser, } } // Add soft pod anti affinity if dpm.Spec.Template.Spec.Affinity == nil { dpm.Spec.Template.Spec.Affinity = &v1.Affinity{ PodAntiAffinity: &v1.PodAntiAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ { Weight: 100, PodAffinityTerm: v1.PodAffinityTerm{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "created-by": "kubeless", "function": funcObj.ObjectMeta.Name, }, }, TopologyKey: "kubernetes.io/hostname", }, }, }, }, } } _, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Create(dpm) if err != nil && k8sErrors.IsAlreadyExists(err) { // In case the Deployment already exists we should update // just certain fields (to avoid race conditions) var newDpm *appsv1.Deployment newDpm, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { return err } if !hasDefaultLabel(newDpm.ObjectMeta.Labels) { return fmt.Errorf("Found a conflicting deployment object %s/%s. Aborting", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name) } newDpm.ObjectMeta.Labels = funcObj.ObjectMeta.Labels newDpm.ObjectMeta.Annotations = funcObj.Spec.Deployment.ObjectMeta.Annotations newDpm.ObjectMeta.OwnerReferences = or // We should maintain previous selector to avoid duplicated ReplicaSets selector := newDpm.Spec.Selector newDpm.Spec = dpm.Spec newDpm.Spec.Selector = selector data, err := json.Marshal(newDpm) if err != nil { return err } // Use `Patch` to do a rolling update _, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Patch(newDpm.Name, types.MergePatchType, data) if err != nil { return err } } return err } // CreateServiceMonitor creates a Service Monitor for the given function func CreateServiceMonitor(smclient monitoringv1alpha1.MonitoringV1alpha1Client, funcObj *kubelessApi.Function, ns string, or []metav1.OwnerReference) error { _, err := smclient.ServiceMonitors(ns).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { if k8sErrors.IsNotFound(err) { s := &monitoringv1alpha1.ServiceMonitor{ ObjectMeta: metav1.ObjectMeta{ Name: funcObj.ObjectMeta.Name, Namespace: ns, Labels: addDefaultLabel(map[string]string{ "service-monitor": "function", }), OwnerReferences: or, }, Spec: monitoringv1alpha1.ServiceMonitorSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "function": funcObj.ObjectMeta.Name, }, }, Endpoints: []monitoringv1alpha1.Endpoint{ { Port: "http-function-port", }, }, }, } _, err = smclient.ServiceMonitors(ns).Create(s) if err != nil { return err } } return nil } return fmt.Errorf("service monitor has already existed") } // GetOwnerReference returns ownerRef for appending to objects's metadata func GetOwnerReference(kind, apiVersion, name string, uid types.UID) ([]metav1.OwnerReference, error) { if name == "" { return []metav1.OwnerReference{}, fmt.Errorf("name can't be empty") } if uid == "" { return []metav1.OwnerReference{}, fmt.Errorf("uid can't be empty") } return []metav1.OwnerReference{ { Kind: kind, APIVersion: apiVersion, Name: name, UID: uid, }, }, nil } // GetInClusterConfig returns necessary Config object to authenticate k8s clients if env variable is set func GetInClusterConfig() (*rest.Config, error) { config, err := rest.InClusterConfig() tokenFile := os.Getenv("KUBELESS_TOKEN_FILE_PATH") if len(tokenFile) == 0 { return config, err } tokenBytes, err := ioutil.ReadFile(tokenFile) if err != nil { return nil, fmt.Errorf("unable to read file containing oauth token: %s", err) } config.BearerToken = string(tokenBytes) return config, nil } func getConfigLocation(apiExtensionsClientset clientsetAPIExtensions.Interface) (ConfigLocation, error) { configLocation := ConfigLocation{} controllerNamespace := os.Getenv("KUBELESS_NAMESPACE") kubelessConfig := os.Getenv("KUBELESS_CONFIG") annotationsCRD, err := GetAnnotationsFromCRD(apiExtensionsClientset, "functions.kubeless.io") if err != nil { return configLocation, err } if len(controllerNamespace) == 0 { if ns, ok := annotationsCRD["kubeless.io/namespace"]; ok { controllerNamespace = ns } else { controllerNamespace = "kubeless" } } configLocation.Namespace = controllerNamespace if len(kubelessConfig) == 0 { if config, ok := annotationsCRD["kubeless.io/config"]; ok { kubelessConfig = config } else { kubelessConfig = "kubeless-config" } } configLocation.Name = kubelessConfig return configLocation, nil } // GetKubelessConfig Returns Kubeless ConfigMap func GetKubelessConfig(cli kubernetes.Interface, cliAPIExtensions clientsetAPIExtensions.Interface) (*v1.ConfigMap, error) { configLocation, err := getConfigLocation(cliAPIExtensions) if err != nil { return nil, fmt.Errorf("Error while fetching config location: %v", err) } controllerNamespace := configLocation.Namespace kubelessConfig := configLocation.Name config, err := cli.CoreV1().ConfigMaps(controllerNamespace).Get(kubelessConfig, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("Unable to read the configmap: %s", err) } return config, nil } // DryRunFmt stringify the given interface in a specific format func DryRunFmt(format string, trigger interface{}) (string, error) { switch format { case "json": j, err := json.MarshalIndent(trigger, "", " ") if err != nil { return "", err } return string(j[:]), nil case "yaml": y, err := yaml.Marshal(trigger) if err != nil { return "", err } return string(y[:]), nil default: return "", fmt.Errorf("Output format needs to be yaml or json") } } // getCompressionType returns the compression type (if any) of the given file by looking at the file extension func getCompressionType(filename string) (compressionType string) { if strings.HasSuffix(filename, ".zip") { compressionType = "+zip" } extensions := []string{".tar.gz", ".taz", ".tgz", ".tar.bz2", ".tb2", ".tbz", ".tbz2", ".tz2", ".tar.xz"} for _, ext := range extensions { if strings.HasSuffix(filename, ext) { compressionType = "+compressedtar" break } } return } // GetContentType Gets the content type of a given filename func GetContentType(filename string) (string, error) { var contentType string if strings.Index(filename, "http://") == 0 || strings.Index(filename, "https://") == 0 { contentType = "url" + getCompressionType(strings.Split(filename, "?")[0]) } else { fbytes, err := ioutil.ReadFile(filename) if err != nil { return "", err } isText := utf8.ValidString(string(fbytes)) if isText { contentType = "text" } else { contentType = "base64" } contentType += getCompressionType(filename) } return contentType, nil } // ParseContent Parses the content of a file as string func ParseContent(file, contentType string) (string, string, error) { var checksum, content string if strings.Contains(contentType, "url") { functionURL, err := url.Parse(file) if err != nil { return "", "", err } resp, err := http.Get(functionURL.String()) if err != nil { return "", "", err } defer resp.Body.Close() functionBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return "", "", err } content = string(functionBytes) checksum, err = getSha256(functionBytes) if err != nil { return "", "", err } } else { functionBytes, err := ioutil.ReadFile(file) if err != nil { return "", "", err } if contentType == "text" { content = string(functionBytes) } else { content = base64.StdEncoding.EncodeToString(functionBytes) } checksum, err = getFileSha256(file) if err != nil { return "", "", err } } return content, checksum, nil } // Get the checksum of a file using sha256 func getFileSha256(file string) (string, error) { h := sha256.New() ff, err := os.Open(file) if err != nil { return "", err } defer ff.Close() _, err = io.Copy(h, ff) if err != nil { return "", err } checksum := hex.EncodeToString(h.Sum(nil)) return "sha256:" + checksum, err } // Get the checksum using sha256 func getSha256(bytes []byte) (string, error) { h := sha256.New() _, err := h.Write(bytes) if err != nil { return "", err } checksum := hex.EncodeToString(h.Sum(nil)) return "sha256:" + checksum, nil } ================================================ FILE: pkg/utils/kubelessutil_test.go ================================================ package utils import ( "reflect" "strconv" "strings" "testing" kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1" "github.com/kubeless/kubeless/pkg/langruntime" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/fake" ) func getEnvValueFromList(envName string, l []v1.EnvVar) string { var res v1.EnvVar for _, env := range l { if env.Name == envName { res = env break } } return res.Value } func TestEnsureConfigMap(t *testing.T) { clientset := fake.NewSimpleClientset() or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } ns := "default" funcLabels := map[string]string{ "foo": "bar", } f1Name := "f1" f1 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: funcLabels, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "python2.7", }, } langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() err := EnsureFuncConfigMap(clientset, f1, or, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } cm, err := clientset.CoreV1().ConfigMaps(ns).Get(f1Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedCM := v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: funcLabels, OwnerReferences: or, }, Data: map[string]string{ "handler": "foo.bar", "foo.py": "function", "requirements.txt": "deps", }, } if !reflect.DeepEqual(*cm, expectedCM) { t.Errorf("Unexpected ConfigMap:\n %+v\nExpecting:\n %+v", *cm, expectedCM) } } func TestEnsureFuncMapWithoutDeps(t *testing.T) { clientset := fake.NewSimpleClientset() or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } ns := "default" langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() // It should skip the dependencies field in case it is not supported f2Name := "f2" f2 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f2Name, Namespace: ns, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Handler: "foo.bar", Runtime: "cobol", }, } err := EnsureFuncConfigMap(clientset, f2, or, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } cm, err := clientset.CoreV1().ConfigMaps(ns).Get(f2Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedData := map[string]string{ "handler": "foo.bar", "foo": "function", } if !reflect.DeepEqual(cm.Data, expectedData) { t.Errorf("Unexpected ConfigMap:\n %+v\nExpecting:\n %+v", cm.Data, expectedData) } // If there is already a config map it should update the previous one f2 = &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f2Name, Namespace: ns, }, Spec: kubelessApi.FunctionSpec{ Function: "function2", Handler: "foo2.bar2", Runtime: "python3.4", }, } err = EnsureFuncConfigMap(clientset, f2, or, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } cm, err = clientset.CoreV1().ConfigMaps(ns).Get(f2Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedData = map[string]string{ "handler": "foo2.bar2", "foo2.py": "function2", "requirements.txt": "", } if !reflect.DeepEqual(cm.Data, expectedData) { t.Errorf("Unexpected ConfigMap:\n %+v\nExpecting:\n %+v", cm.Data, expectedData) } } func TestAvoidConfigMapOverwrite(t *testing.T) { f1Name := "f1" clientset, or, ns, lr := prepareDeploymentTest(f1Name) clientset.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, }, }) f1 := getDefaultFunc(f1Name, ns) err := EnsureFuncConfigMap(clientset, f1, or, lr) if err == nil && strings.Contains(err.Error(), "conflicting object") { t.Errorf("It should fail because a conflict") } } func TestEnsureFileNames(t *testing.T) { tests := []struct { name string contentType string fileNameSuffix string }{ {name: "text", contentType: "text", fileNameSuffix: ".py"}, {name: "empty", contentType: "", fileNameSuffix: ".py"}, {name: "base64", contentType: "base64", fileNameSuffix: ".py"}, {name: "url", contentType: "url", fileNameSuffix: ".py"}, {name: "text+zip", contentType: "text+zip", fileNameSuffix: ""}, {name: "text+compressedtar", contentType: "text+compressedtar", fileNameSuffix: ""}, {name: "base64+zip", contentType: "base64+zip", fileNameSuffix: ""}, {name: "base64+compressedtar", contentType: "base64+compressedtar", fileNameSuffix: ""}, {name: "url+zip", contentType: "url+zip", fileNameSuffix: ""}, {name: "url+compressedtar", contentType: "url+compressedtar", fileNameSuffix: ""}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { clientset := fake.NewSimpleClientset() or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } ns := "default" f1Name := "f1" f1Runtime := "python" f1 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Handler: "foo.bar", FunctionContentType: test.contentType, Runtime: f1Runtime, }, } langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() err := EnsureFuncConfigMap(clientset, f1, or, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } cm, err := clientset.CoreV1().ConfigMaps(ns).Get(f1Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedData := map[string]string{ "requirements.txt": "", "handler": "foo.bar", "foo" + test.fileNameSuffix: "function", } if !reflect.DeepEqual(cm.Data, expectedData) { t.Errorf("Unexpected ConfigMap:\n %+v\nExpecting:\n %+v", cm.Data, expectedData) } }) } } func TestEnsureService(t *testing.T) { fakeSvc := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: "myns", Name: "foo", }, } clientset := fake.NewSimpleClientset(&fakeSvc) or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } ns := "default" funcLabels := map[string]string{ "foo": "bar", } f1Name := "f1" f1 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: funcLabels, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "python2.7", }, } err := EnsureFuncService(clientset, f1, or) if err != nil { t.Errorf("Unexpected error: %s", err) } svc, err := clientset.CoreV1().Services(ns).Get(f1Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedSVC := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: funcLabels, OwnerReferences: or, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "http-function-port", Port: 8080, TargetPort: intstr.FromInt(8080), NodePort: 0, Protocol: v1.ProtocolTCP, }, }, Selector: funcLabels, Type: v1.ServiceTypeClusterIP, }, } if !reflect.DeepEqual(*svc, expectedSVC) { t.Errorf("Unexpected service:\n %+v\nExpecting:\n %+v", *svc, expectedSVC) } } func TestUpdateFuncSvc(t *testing.T) { fakeSvc := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: "myns", Name: "foo", }, } clientset := fake.NewSimpleClientset(&fakeSvc) or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } ns := "default" // If there is already a service it should update the previous one funcLabels := map[string]string{ "foo": "bar", } f1Name := "f1" f1 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: funcLabels, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "python2.7", }, } err := EnsureFuncService(clientset, f1, or) if err != nil { t.Errorf("Unexpected error: %s", err) } newLabels := map[string]string{ "foobar": "barfoo", } f1.ObjectMeta.Labels = newLabels err = EnsureFuncService(clientset, f1, or) if err != nil { t.Errorf("Unexpected error: %s", err) } svc, err := clientset.CoreV1().Services(ns).Get(f1Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if !reflect.DeepEqual(svc.ObjectMeta.Labels, newLabels) { t.Error("Unable to update the service") } if reflect.DeepEqual(svc.Spec.Selector, newLabels) { t.Error("It should not update the selector") } } func TestAvoidServiceOverwrite(t *testing.T) { f1Name := "f1" ns := "default" or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } clientset := fake.NewSimpleClientset() clientset.CoreV1().Services(ns).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, }, }) f1 := getDefaultFunc(f1Name, ns) err := EnsureFuncService(clientset, f1, or) if err == nil && strings.Contains(err.Error(), "conflicting object") { t.Errorf("It should fail because a conflict") } } func TestEnsureImage(t *testing.T) { clientset := fake.NewSimpleClientset() langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() ns := "default" f1Name := "f1" or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "kubeless.io/v1beta1", }, } f1 := &kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "python2.7", }, } // Testing happy path pullSecrets := []v1.LocalObjectReference{ {Name: "creds"}, } err := EnsureFuncImage(clientset, f1, lr, or, "user/image", "4840d87600137157493ba43a24f0b4bb6cf524ebbf095ce96c79f85bf5a3ff5a", "kubeless/builder", "registry.docker.io", "registry-creds", "unzip", true, pullSecrets) if err != nil { t.Errorf("Unexpected error: %s", err) } jobs, err := clientset.BatchV1().Jobs(ns).List(metav1.ListOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if len(jobs.Items) != 1 { t.Errorf("It should have created the build job") } buildContainer := jobs.Items[0].Spec.Template.Spec.Containers[0] if buildContainer.Image != "kubeless/builder" { t.Errorf("Image %s of build job is not recognised", jobs.Items[0].Spec.Template.Spec.Containers[0].Image) } dockerConfigFolder := "" for _, envvar := range buildContainer.Env { if envvar.Name == "DOCKER_CONFIG_FOLDER" { dockerConfigFolder = envvar.Value } } if dockerConfigFolder == "" { t.Error("Builder image relies on the env var DOCKER_CONFIG_FOLDER to authenticate") } initContainer := jobs.Items[0].Spec.Template.Spec.InitContainers[0] if initContainer.Image != "unzip" { t.Errorf("Unexpected init image %s", initContainer.Image) } if reflect.DeepEqual(jobs.Items[0].Spec.Template.Spec.ImagePullSecrets, pullSecrets) { t.Error("Missing ImagePullSecrets") } // ensure my-secret is mounted as /var/run/secrets/kubeless.io/my-secret to install container var container v1.Container for _, c := range jobs.Items[0].Spec.Template.Spec.InitContainers { if c.Name == "install" { container = c } } if len(container.Name) == 0 { t.Fatalf("Cannot find init container %q", "install") } var found bool for _, v := range container.VolumeMounts { if v.MountPath == "/var/run/secrets/kubeless.io/my-secret" { found = true } } if !found { t.Fatalf("Cannot find volume mount /var/run/secrets/kubeless.io/my-secret") } } func getDefaultFunc(name, ns string) *kubelessApi.Function { fPort := int32(8080) f := kubelessApi.Function{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, Spec: kubelessApi.FunctionSpec{ Function: "function", Deps: "deps", Handler: "foo.bar", Runtime: "python2.7", ServiceSpec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "http-function-port", Port: fPort, TargetPort: intstr.FromInt(int(fPort)), NodePort: 0, Protocol: v1.ProtocolTCP, }, }, Type: v1.ServiceTypeClusterIP, }, Deployment: appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Env: []v1.EnvVar{ { Name: "foo", Value: "bar", }, }, }, }, }, }, }, }, }, } return &f } func prepareDeploymentTest(funcName string) (*fake.Clientset, []metav1.OwnerReference, string, *langruntime.Langruntimes) { clientset := fake.NewSimpleClientset() or := []metav1.OwnerReference{ { Kind: "Function", APIVersion: "k8s.io", }, } ns := "default" langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() return clientset, or, ns, lr } func TestEnsureDeployment(t *testing.T) { f1Name := "f1" clientset, or, ns, lr := prepareDeploymentTest(f1Name) funcLabels := map[string]string{ "foo": "bar", } funcAnno := map[string]string{ "bar": "foo", } f1 := getDefaultFunc(f1Name, ns) f1.Spec.Deployment.Spec.Template.Spec.InitContainers = []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceLimitsCPU: resource.MustParse("100m"), }, }, }, } f1Port := f1.Spec.ServiceSpec.Ports[0].Port f1.ObjectMeta.Labels = funcLabels f1.Spec.Deployment.ObjectMeta = metav1.ObjectMeta{ Annotations: funcAnno, } f1.Spec.Deployment.Spec.Template.ObjectMeta = metav1.ObjectMeta{ Annotations: funcAnno, } // Testing happy path pullSecrets := []v1.LocalObjectReference{ {Name: "creds"}, } err := EnsureFuncDeployment(clientset, f1, or, lr, "", "unzip", pullSecrets) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(f1Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedObjectMeta := metav1.ObjectMeta{ Name: f1Name, Namespace: ns, Labels: addDefaultLabel(funcLabels), OwnerReferences: or, Annotations: funcAnno, } if !reflect.DeepEqual(dpm.ObjectMeta, expectedObjectMeta) { t.Errorf("Unable to set metadata. Received:\n %+v\nExpecting:\n %+v", dpm.ObjectMeta, expectedObjectMeta) } expectedAnnotations := map[string]string{ "prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": strconv.Itoa(int(f1Port)), "bar": "foo", } for i := range expectedAnnotations { if dpm.Spec.Template.Annotations[i] != expectedAnnotations[i] { t.Errorf("Expecting annotation %s but received %s", expectedAnnotations[i], dpm.Spec.Template.Annotations[i]) } } if dpm.Spec.Template.Annotations["bar"] != "foo" { t.Error("Unable to set annotations") } expectedContainer := v1.Container{ Name: f1Name, Image: "bar", Ports: []v1.ContainerPort{ { ContainerPort: int32(f1Port), }, }, Env: []v1.EnvVar{ { Name: "foo", Value: "bar", }, { Name: "FUNC_HANDLER", Value: "bar", }, { Name: "MOD_NAME", Value: "foo", }, { Name: "FUNC_TIMEOUT", Value: "180", }, { Name: "FUNC_RUNTIME", Value: "python2.7", }, { Name: "FUNC_MEMORY_LIMIT", Value: "0", }, { Name: "FUNC_PORT", Value: strconv.Itoa(int(f1Port)), }, { Name: "KUBELESS_INSTALL_VOLUME", Value: "/kubeless", }, { Name: "PYTHONPATH", Value: "/kubeless/lib/python2.7/site-packages:/kubeless", }, }, VolumeMounts: []v1.VolumeMount{ { Name: f1Name, MountPath: "/kubeless", }, }, LivenessProbe: &v1.Probe{ InitialDelaySeconds: int32(5), PeriodSeconds: int32(10), Handler: v1.Handler{ Exec: &v1.ExecAction{ Command: []string{"curl", "-f", "http://localhost:8080/healthz"}, }, }, }, } if !reflect.DeepEqual(dpm.Spec.Template.Spec.Containers[0], expectedContainer) { t.Errorf("Unexpected container definition. Received:\n %+v\nExpecting:\n %+v", dpm.Spec.Template.Spec.Containers[0], expectedContainer) } expectedAffinity := &v1.Affinity{ PodAntiAffinity: &v1.PodAntiAffinity{ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ { Weight: 100, PodAffinityTerm: v1.PodAffinityTerm{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "created-by": "kubeless", "function": f1Name, }, }, TopologyKey: "kubernetes.io/hostname", }, }, }, }, } if !reflect.DeepEqual(dpm.Spec.Template.Spec.Affinity, expectedAffinity) { t.Errorf("Unexpected pod affinity definition. Received:\n %+v\nExpecting:\n %+v", dpm.Spec.Template.Spec.Affinity, expectedAffinity) } secrets := dpm.Spec.Template.Spec.ImagePullSecrets if secrets[0].Name != "creds" && secrets[1].Name != "p1" && secrets[2].Name != "p2" { t.Errorf("Expected first secret to be 'p1' but found %v and second secret to be 'p2' and found %v", secrets[0], secrets[1]) } // Init containers behavior should be tested with integration tests if len(dpm.Spec.Template.Spec.InitContainers) < 1 { t.Errorf("Expecting at least an init container to install deps") } if dpm.Spec.Template.Spec.InitContainers[0].Image != "unzip" { t.Errorf("Unexpected init image %s", dpm.Spec.Template.Spec.InitContainers[0].Image) } if dpm.Spec.Template.Spec.InitContainers[0].Resources.Limits == nil { t.Errorf("Resources must be set for init container") } // ensure my-secret is mounted as /var/run/secrets/kubeless.io/my-secret to install container var container v1.Container for _, c := range dpm.Spec.Template.Spec.InitContainers { if c.Name == "install" { container = c } } if len(container.Name) == 0 { t.Fatalf("Cannot find init container %q", "install") } var found bool for _, v := range container.VolumeMounts { if v.MountPath == "/var/run/secrets/kubeless.io/my-secret" { found = true } } if !found { t.Fatalf("Cannot find volume mount /var/run/secrets/kubeless.io/my-secret") } } func TestEnsureDeploymentWithoutFuncNorHandler(t *testing.T) { funcName := "func2" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If no handler and function is given it should not fail f2 := getDefaultFunc(funcName, ns) f2.Spec.Function = "" f2.Spec.Handler = "" err := EnsureFuncDeployment(clientset, f2, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } _, err = clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } } func TestEnsureDeploymentWithImage(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If the Image has been already provided it should not resolve it f3 := getDefaultFunc(funcName, ns) f3.Spec.Deployment.Spec.Template.Spec.Containers[0].Image = "test-image" err := EnsureFuncDeployment(clientset, f3, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if dpm.Spec.Template.Spec.Containers[0].Image != "test-image" { t.Errorf("Unexpected Image Name: %s", dpm.Spec.Template.Spec.Containers[0].Image) } } func TestEnsureDeploymentWithoutFunc(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If no function is given it should not use an init container f4 := getDefaultFunc(funcName, ns) f4.Spec.Function = "" f4.Spec.Deps = "" err := EnsureFuncDeployment(clientset, f4, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if len(dpm.Spec.Template.Spec.InitContainers) > 0 { t.Error("It should not setup an init container") } } func TestEnsureUpdateDeployment(t *testing.T) { f1Name := "f1" clientset, or, ns, lr := prepareDeploymentTest(f1Name) // It should update a deployment if it is already present funcAnno := map[string]string{ "bar": "foo", } f1 := getDefaultFunc(f1Name, ns) f1.Spec.Deployment.ObjectMeta = metav1.ObjectMeta{ Annotations: funcAnno, } f1.Spec.Deployment.Spec.Template.ObjectMeta = metav1.ObjectMeta{ Annotations: funcAnno, } err := EnsureFuncDeployment(clientset, f1, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } f6 := kubelessApi.Function{} f6 = *f1 f6.Spec.Handler = "foo.bar2" f6.Spec.Deployment.ObjectMeta.Annotations["new-key"] = "value" err = EnsureFuncDeployment(clientset, &f6, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } // Unable to ensure that the new deployment is patched since fake // ignores PATCH actions: https://github.com/kubernetes/client-go/issues/364 } func TestAvoidDeploymentOverwrite(t *testing.T) { f1Name := "f1" clientset, or, ns, lr := prepareDeploymentTest(f1Name) clientset.AppsV1().Deployments(ns).Create(&appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: f1Name, Namespace: ns, }, }) f1 := getDefaultFunc(f1Name, ns) err := EnsureFuncDeployment(clientset, f1, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err == nil && strings.Contains(err.Error(), "conflicting object") { t.Errorf("It should fail because a conflict") } } func TestDeploymentWithUnsupportedRuntime(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // It should return an error if some dependencies are given but the runtime is not supported f7 := getDefaultFunc("func7", ns) f7.Spec.Deps = "deps" f7.Spec.Runtime = "cobol" err := EnsureFuncDeployment(clientset, f7, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err == nil { t.Fatal("An error should be thrown") } } func TestDeploymentWithTimeout(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If a timeout is specified it should set an environment variable FUNC_TIMEOUT f8 := getDefaultFunc(funcName, ns) f8.Spec.Timeout = "10" err := EnsureFuncDeployment(clientset, f8, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if getEnvValueFromList("FUNC_TIMEOUT", dpm.Spec.Template.Spec.Containers[0].Env) != "10" { t.Error("Unable to set timeout") } } func TestDeploymentWithPrebuiltImage(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If a prebuilt image is specified it should not build the function using init containers f9 := getDefaultFunc(funcName, ns) err := EnsureFuncDeployment(clientset, f9, or, lr, "user/image:test", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if dpm.Spec.Template.Spec.Containers[0].Image != "user/image:test" { t.Errorf("Unexpected image %s, expecting prebuilt user/image:test", dpm.Spec.Template.Spec.Containers[0].Image) } if len(dpm.Spec.Template.Spec.InitContainers) != 0 { t.Error("Unexpected init containers") } } func TestDeploymentWithVolumes(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // It should include existing volumes f10 := getDefaultFunc(funcName, ns) f10.Spec.Deployment.Spec.Template.Spec.Volumes = []v1.Volume{ { Name: "test", VolumeSource: v1.VolumeSource{}, }, } f10.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{ { Name: "test", MountPath: "/tmp/test", }, } err := EnsureFuncDeployment(clientset, f10, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } if dpm.Spec.Template.Spec.Volumes[0].Name != "test" { t.Error("Should maintain volumen test") } if dpm.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name != "test" { t.Error("Should maintain volumen test") } } func TestEnsureDeploymentWithAffinityOverridden(t *testing.T) { funcName := "func" clientset, or, ns, lr := prepareDeploymentTest(funcName) // If the Image has been already provided it should not resolve it f3 := getDefaultFunc(funcName, ns) f3.Spec.Deployment.Spec.Template.Spec.Affinity = &v1.Affinity{} err := EnsureFuncDeployment(clientset, f3, or, lr, "", "unzip", []v1.LocalObjectReference{}) if err != nil { t.Errorf("Unexpected error: %s", err) } dpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedAffinity := &v1.Affinity{NodeAffinity: nil, PodAffinity: nil, PodAntiAffinity: nil} if *dpm.Spec.Template.Spec.Affinity != *expectedAffinity { t.Errorf( "Unexpected Affinity Definition:\nExpecting: %+v\nReceived: %+v", expectedAffinity, dpm.Spec.Template.Spec.Affinity, ) } } func doesNotContain(envs []v1.EnvVar, env v1.EnvVar) bool { for _, e := range envs { if e == env { return false } } return true } func TestGetProvisionContainer(t *testing.T) { clientset := fake.NewSimpleClientset() langruntime.AddFakeConfig(clientset) lr := langruntime.SetupLangRuntime(clientset) lr.ReadConfigMap() rvol := v1.VolumeMount{Name: "runtime", MountPath: "/runtime"} dvol := v1.VolumeMount{Name: "deps", MountPath: "/deps"} resources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}} c, err := getProvisionContainer("test", "sha256:abc1234", "test.func", "test.foo", "text", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } expectedContainer := v1.Container{ Name: "prepare", Image: "unzip", Command: []string{"sh", "-c"}, Args: []string{"echo 'abc1234 /deps/test.func' > /tmp/func.sha256 && sha256sum -c /tmp/func.sha256 && cp /deps/test.func /runtime/test.py && cp /deps/requirements.txt /runtime"}, VolumeMounts: []v1.VolumeMount{rvol, dvol}, ImagePullPolicy: v1.PullIfNotPresent, Resources: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse("100m")}}, } if !reflect.DeepEqual(expectedContainer, c) { t.Errorf("Unexpected result:\n %+v", c) } // If the content type is encoded it should decode it c, err = getProvisionContainer("Zm9vYmFyCg==", "sha256:abc1234", "test.func", "test.foo", "base64", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "base64 -d < /deps/test.func > /tmp/func.decoded") { t.Errorf("Unexpected command: %s", c.Args[0]) } secrets, err := lr.GetImageSecrets("python2.7") if err != nil { t.Errorf("Unable to fetch secrets: %v", err) } if secrets[0].Name != "p1" && secrets[1].Name != "p2" { t.Errorf("Expected first secret to be 'p1' but found %v and second secret to be 'p2' but found %v", secrets[0], secrets[1]) } // It should skip the dependencies installation if the runtime is not supported c, err = getProvisionContainer("function", "sha256:abc1234", "test.func", "test.foo", "text", "cobol", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if strings.Contains(c.Args[0], "cp /deps ") { t.Errorf("Unexpected command: %s", c.Args[0]) } // It should extract the file in case it is a Zip c, err = getProvisionContainer("Zm9vYmFyCg==", "sha256:abc1234", "test.zip", "test.foo", "base64+zip", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "base64 -d < /deps/test.zip > /tmp/func.decoded") { t.Errorf("Unexpected command: %s", c.Args[0]) } if !strings.Contains(c.Args[0], "unzip -o /tmp/func.decoded -d /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } // It should extract the compressed tar file c, err = getProvisionContainer("Zm9vYmFyCg==", "sha256:abc1234", "test.tar.gz", "test.foo", "base64+compressedtar", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "base64 -d < /deps/test.tar.gz > /tmp/func.decoded") { t.Errorf("Unexpected command: %s", c.Args[0]) } if !strings.Contains(c.Args[0], "tar xf /tmp/func.decoded -C /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } // If the content type is url it should use curl c, err = getProvisionContainer("https://raw.githubusercontent.com/test/test/test/test.py", "sha256:abc1234", "", "test.foo", "url", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "curl 'https://raw.githubusercontent.com/test/test/test/test.py' -L --silent --output /tmp/func.fromurl") { t.Errorf("Unexpected command: %s", c.Args[0]) } // If the content type is url+zip it should use curl and unzip c, err = getProvisionContainer("https://raw.githubusercontent.com/test/test/test/test.zip", "sha256:abc1234", "", "test.foo", "url+zip", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "curl 'https://raw.githubusercontent.com/test/test/test/test.zip' -L --silent --output /tmp/func.fromurl") { t.Errorf("Unexpected command: %s", c.Args[0]) } if !strings.Contains(c.Args[0], "unzip -o /tmp/func.fromurl -d /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } // If the content type is url+compressedtar it should use curl and tar c, err = getProvisionContainer("https://raw.githubusercontent.com/test/test/test/test.tar.gz", "sha256:abc1234", "", "test.foo", "url+compressedtar", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "curl 'https://raw.githubusercontent.com/test/test/test/test.tar.gz' -L --silent --output /tmp/func.fromurl") { t.Errorf("Unexpected command: %s", c.Args[0]) } if !strings.Contains(c.Args[0], "tar xf /tmp/func.fromurl -C /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } // if the function use bundled deps in remote zip file c, err = getProvisionContainer("https://raw.githubusercontent.com/test/test/test/test.zip", "sha256:abc1234", "", "test.foo", "url+zip+deps", "python2.7", "unzip", rvol, dvol, resources, lr) if err != nil { t.Errorf("Unexpected error: %s", err) } if !strings.HasPrefix(c.Args[0], "curl 'https://raw.githubusercontent.com/test/test/test/test.zip' -L --silent --output /tmp/func.fromurl") { t.Errorf("Unexpected command: %s", c.Args[0]) } if !strings.Contains(c.Args[0], "unzip -o /tmp/func.fromurl -d /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } // use bundled deps will not copy the requirements.txt to /runtime if strings.Contains(c.Args[0], "cp /deps/requirements.txt /runtime") { t.Errorf("Unexpected command: %s", c.Args[0]) } } ================================================ FILE: pkg/utils/metrics.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "bytes" "k8s.io/client-go/kubernetes" "github.com/prometheus/common/expfmt" ) // Metric contains metrics for a functions type Metric struct { FunctionName string `json:"function,omitempty"` Namespace string `json:"namespace,omitempty"` Method string `json:"method,omitempty"` Message string `json:"message,omitempty"` TotalCalls float64 `json:"total_calls,omitempty"` TotalFailures float64 `json:"total_failures,omitempty"` TotalDurationSeconds float64 `json:"total_duration_seconds,omitempty"` AvgDurationSeconds float64 `json:"avg_duration_seconds,omitempty"` } // MetricsRetriever is an interface for retreiving metrics from an endpoint type MetricsRetriever interface { GetRawMetrics(kubernetes.Interface, string, string) ([]byte, error) } // PrometheusMetricsHandler is a handler for retreiving metrics from Prometheus type PrometheusMetricsHandler struct{} func parseMetrics(namespace, functionName string, rawMetrics []byte) ([]*Metric, error) { parser := expfmt.TextParser{} parsedData, err := parser.TextToMetricFamilies(bytes.NewReader(rawMetrics)) if err != nil { return nil, err } tmp := map[string]*Metric{} var parsedMetrics []*Metric metricsOfInterest := []string{"function_duration_seconds", "function_calls_total", "function_failures_total"} for _, m := range metricsOfInterest { for _, metric := range parsedData[m].GetMetric() { // a function can have metrics for multiple methods (GET, POST, etc.) // method names can be values other than GET/POST/PUT/DELETE for _, label := range metric.GetLabel() { if label.GetName() == "method" { if _, ok := tmp[label.GetValue()]; !ok { tmp[label.GetValue()] = &Metric{ FunctionName: functionName, Namespace: namespace, Method: label.GetValue(), } } if m == "function_failures_total" { tmp[label.GetValue()].TotalFailures = metric.GetCounter().GetValue() } if m == "function_duration_seconds" { tmp[label.GetValue()].TotalDurationSeconds = metric.GetHistogram().GetSampleSum() } if m == "function_calls_total" { tmp[label.GetValue()].TotalCalls = metric.GetCounter().GetValue() if tmp[label.GetValue()].TotalCalls > 0 { tmp[label.GetValue()].AvgDurationSeconds = float64(tmp[label.GetValue()].TotalDurationSeconds) / tmp[label.GetValue()].TotalCalls } } } } } } // if the funciton hasn't been invoked, add an item to the list so the function displays in the output if len(tmp) == 0 { tmp[""] = &Metric{ FunctionName: functionName, Namespace: namespace, } } for _, v := range tmp { parsedMetrics = append(parsedMetrics, v) } return parsedMetrics, nil } // GetRawMetrics returns the raw metrics for a Prometheus endpoint func (h *PrometheusMetricsHandler) GetRawMetrics(apiV1Client kubernetes.Interface, namespace, functionName string) ([]byte, error) { port, err := GetFunctionPort(apiV1Client, namespace, functionName) if err != nil { return []byte{}, err } req := apiV1Client.CoreV1().RESTClient().Get().Namespace(namespace).Resource("services").SubResource("proxy").Name(functionName + ":" + port).Suffix("/metrics") return req.Do().Raw() } // GetFunctionMetrics returns Prometheus metrics as a slice of *Metrics func GetFunctionMetrics(apiV1Client kubernetes.Interface, h MetricsRetriever, namespace, functionName string) []*Metric { res, err := h.GetRawMetrics(apiV1Client, namespace, functionName) if err != nil { return []*Metric{ { FunctionName: functionName, Namespace: namespace, Message: "Function does not expose metrics", }, } } metrics, err := parseMetrics(namespace, functionName, res) if err != nil { return []*Metric{ { FunctionName: functionName, Namespace: namespace, Message: "Unable to get function metrics", }, } } return metrics } ================================================ FILE: pkg/version/version.go ================================================ /* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package version var ( // Version will be set automatically by the build system via -ldflags Version string ) ================================================ FILE: script/.validate ================================================ #!/bin/bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if [ -z "$VALIDATE_UPSTREAM" ]; then # this is kind of an expensive check, so let's not do this twice if we # are running more than one validate bundlescript VALIDATE_REPO='https://github.com/kubeless/kubeless.git' VALIDATE_BRANCH='master' if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" VALIDATE_BRANCH="${TRAVIS_BRANCH}" fi VALIDATE_HEAD="$(git rev-parse --verify HEAD)" git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" validate_diff() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git diff "$VALIDATE_COMMIT_DIFF" "$@" fi } validate_log() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git log "$VALIDATE_COMMIT_LOG" "$@" fi } fi ================================================ FILE: script/binary ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e GIT_COMMIT=$(git describe --tags --dirty) BUILD_FLAGS=(-ldflags="-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}") # Get rid of existing binary echo "Removing Old Kubeless binaries" rm -f ${GOPATH%%:*}/bin/kubeless rm -f ${GOPATH%%:*}/bin/function-controller echo "Build Kubeless Components binaries" # Build binary go install \ "${BUILD_FLAGS[@]}" \ ./cmd/... if [ $? -eq 0 ]; then echo "Build Kubeless Components successful. Program saved at ${GOPATH%%:*}/bin" else echo "Build Kubeless Components failed." fi ================================================ FILE: script/binary-cli ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e OS_PLATFORM_ARG=(-os="darwin linux windows") OS_ARCH_ARG=(-arch="amd64") GIT_COMMIT=$(git describe --tags --dirty) BUILD_DATE=$(date) BUILD_FLAGS=(-ldflags="-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}") # Get rid of existing binaries rm -rf bundles/kubeless* # Build kubeless gox "${OS_PLATFORM_ARG[@]}" "${OS_ARCH_ARG[@]}" \ -output="bundles/kubeless_{{.OS}}-{{.Arch}}/kubeless" \ "${BUILD_FLAGS[@]}" \ ./cmd/kubeless ================================================ FILE: script/binary-controller ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e if [ -z "$1" ]; then # TODO: Skip windows at this moment OS_PLATFORM_ARG=(-os="linux") else OS_PLATFORM_ARG=($1) fi if [ -z "$2" ]; then OS_ARCH_ARG=(-arch="amd64") else OS_ARCH_ARG=($2) fi if [ -z "$3" ]; then TARGET="kubeless-function-controller" else TARGET=($3) fi if [ -z "$4" ]; then PKG="./cmd/function-controller" else PKG=($4) fi GIT_COMMIT=$(git describe --tags --dirty) BUILD_FLAGS=(-ldflags="-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}") # Get rid of existing binaries rm -rf bundles/kubeless* # Build kubeless-controller gox "${OS_PLATFORM_ARG[@]}" "${OS_ARCH_ARG[@]}" \ -output="bundles/kubeless_{{.OS}}-{{.Arch}}/$TARGET" \ "${BUILD_FLAGS[@]}" \ "$PKG" ================================================ FILE: script/cluster-up-minikube.sh ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # From minikube howto export MINIKUBE_WANTUPDATENOTIFICATION=false export MINIKUBE_WANTREPORTERRORPROMPT=false export MINIKUBE_HOME=$HOME export CHANGE_MINIKUBE_NONE_USER=true mkdir -p ~/.kube touch ~/.kube/config export KUBECONFIG=$HOME/.kube/config export PATH=${PATH}:${GOPATH:?}/bin MINIKUBE_VERSION=${MINIKUBE_VERSION:?} install_bin() { local exe=${1:?} sudo install -v ${exe} /usr/local/bin || install ${exe} ${GOPATH:?}/bin } # Travis ubuntu trusty env doesn't have nsenter, needed for VM-less minikube # (--vm-driver=none, runs dockerized) check_or_build_nsenter() { which nsenter >/dev/null && return 0 echo "INFO: Getting 'nsenter' ..." curl -LO http://mirrors.kernel.org/ubuntu/pool/main/u/util-linux/util-linux_2.30.1-0ubuntu4_amd64.deb dpkg -x ./util-linux_2.30.1-0ubuntu4_amd64.deb /tmp/out install_bin /tmp/out/usr/bin/nsenter } check_or_install_minikube() { which minikube || { wget -q --no-clobber -O minikube \ https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64 install_bin ./minikube } } # Install nsenter if missing check_or_build_nsenter # Install minikube if missing check_or_install_minikube MINIKUBE_BIN=$(which minikube) # Start minikube sudo -E ${MINIKUBE_BIN} start --vm-driver=none \ --extra-config=kubelet.cgroup-driver=cgroupfs \ --memory 4096 # Wait til settles echo "INFO: Waiting for minikube cluster to be ready ..." typeset -i cnt=120 until kubectl --context=minikube get pods >& /dev/null; do ((cnt=cnt-1)) || exit 1 sleep 1 done sudo -E ${MINIKUBE_BIN} update-context # Enable Nginx Ingress echo "INFO: Enabling ingress addon to minikube..." sudo -E ${MINIKUBE_BIN} addons enable ingress sudo -E ${MINIKUBE_BIN} config set WantUpdateNotification false # Give some time for the cluster to become healthy sleep 10 exit 0 # vim: sw=4 ts=4 et si ================================================ FILE: script/create_release.sh ================================================ #!/bin/bash set -e REPO_NAME=kubeless REPO_DOMAIN=kubeless TAG=${1:?} MANIFESTS=${2:?} # Space separated list of manifests to publish PROJECT_DIR=$(cd $(dirname $0)/.. && pwd) source $(dirname $0)/release_utils.sh if [[ -z "$REPO_NAME" || -z "$REPO_DOMAIN" ]]; then echo "Github repository not specified" > /dev/stderr exit 1 fi if [[ -z "$ACCESS_TOKEN" ]]; then echo "Unable to release: Github Token not specified" > /dev/stderr exit 1 fi repo_check=`curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$REPO_DOMAIN/$REPO_NAME` if [[ $repo_check == *"Not Found"* ]]; then echo "Not found a Github repository for $REPO_DOMAIN/$REPO_NAME, it is not possible to publish it" > /dev/stderr exit 1 else RELEASE_ID=$(release_tag $TAG $REPO_DOMAIN $REPO_NAME | jq '.id') fi IFS=' ' read -r -a manifests <<< "$MANIFESTS" for f in "${manifests[@]}"; do cp ${PROJECT_DIR}/${f}.yaml ${PROJECT_DIR}/${f}-${TAG}.yaml upload_asset $REPO_DOMAIN $REPO_NAME "$RELEASE_ID" "${PROJECT_DIR}/${f}-${TAG}.yaml" done for f in `ls ${PROJECT_DIR}/bundles/kubeless_*.zip`; do upload_asset $REPO_DOMAIN $REPO_NAME $RELEASE_ID $f done ================================================ FILE: script/enable-gcloud.sh ================================================ #!/bin/bash set -e BUILD_DIR=${1:?} export GOOGLE_APPLICATION_CREDENTIALS=$BUILD_DIR/client_secrets.json echo $GCLOUD_KEY > $GOOGLE_APPLICATION_CREDENTIALS if [ ! -d $HOME/gcloud/google-cloud-sdk ]; then mkdir -p $HOME/gcloud && wget -q https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-187.0.0-linux-x86_64.tar.gz --directory-prefix=$HOME/gcloud && cd $HOME/gcloud && tar xzf google-cloud-sdk-187.0.0-linux-x86_64.tar.gz && printf '\ny\n\ny\ny\n' | ./google-cloud-sdk/install.sh && sudo ln -s $HOME/gcloud/google-cloud-sdk/bin/gcloud /usr/local/bin/gcloud cd $BUILD_DIR; fi gcloud -q config set project $GKE_PROJECT if [ -a $GOOGLE_APPLICATION_CREDENTIALS ]; then gcloud -q auth activate-service-account --key-file $GOOGLE_APPLICATION_CREDENTIALS; fi ================================================ FILE: script/find_digest.sh ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. REPOSITORY=$1 TARGET_TAG=$2 # get authorization token TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$REPOSITORY:pull" | jq -r .token) # find all tags ALL_TAGS=$(curl -s -H "Authorization: Bearer $TOKEN" https://index.docker.io/v2/$REPOSITORY/tags/list | jq -r .tags[]) # get image digest for target TARGET_DIGEST=$(curl -s -D - -H "Authorization: Bearer $TOKEN" -H "Accept: application/vnd.docker.distribution.manifest.v2+json" https://index.docker.io/v2/$REPOSITORY/manifests/$TARGET_TAG | grep Docker-Content-Digest | cut -d ' ' -f 2) # for each tags for tag in ${ALL_TAGS[@]}; do # get image digest digest=$(curl -s -D - -H "Authorization: Bearer $TOKEN" -H "Accept: application/vnd.docker.distribution.manifest.v2+json" https://index.docker.io/v2/$REPOSITORY/manifests/$tag | grep Docker-Content-Digest | cut -d ' ' -f 2) # check digest if [[ $TARGET_DIGEST = $digest ]]; then echo "$tag $digest" fi done ================================================ FILE: script/integration-tests ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Special case: if ./ksonnet-lib exists, set KUBECFG_JPATH test -d $PWD/ksonnet-lib && export KUBECFG_JPATH=$PWD/ksonnet-lib # We require below env : ${GOPATH:?} ${KUBECFG_JPATH:?} export PATH=${PATH}:${GOPATH}/bin # Default kubernetes context - if it's "dind" or "minikube" will # try to bring up a local (dockerized) cluster test -n "${TRAVIS_K8S_CONTEXT}" && set -- ${TRAVIS_K8S_CONTEXT} # minikube seems to be more stable than dind, sp for kafka INTEGRATION_TESTS_CTX=${1:-minikube} INTEGRATION_TESTS_TARGET=${2:-default} # Check for some needed tools, install (some) if missing which bats > /dev/null || { echo "ERROR: 'bats' is required to run these tests," \ "install it from https://github.com/sstephenson/bats" exit 255 } # Start a k8s cluster (minikube, dind) if not running kubectl get nodes --context=${INTEGRATION_TESTS_CTX:?} || { cluster_up=./script/cluster-up-${INTEGRATION_TESTS_CTX}.sh test -f ${cluster_up} || { echo "FATAL: bringing up k8s cluster '${INTEGRATION_TESTS_CTX}' not supported" exit 255 } ${cluster_up} } # Both RBAC'd dind and minikube seem to be missing rules to make kube-dns work properly # add some (granted) broad ones: kubectl --context=${INTEGRATION_TESTS_CTX:?} get clusterrolebinding kube-dns-admin >& /dev/null || \ kubectl --context=${INTEGRATION_TESTS_CTX:?} create clusterrolebinding kube-dns-admin --serviceaccount=kube-system:default --clusterrole=cluster-admin # Prep: load test library, save current k8s default context (and restore it at exit), # as kubeless doesn't support --context export TEST_CONTEXT=${INTEGRATION_TESTS_CTX} source script/libtest.bash trap k8s_context_restore 0 k8s_context_save # Run the tests thru bats: kubectl create namespace kubeless case $INTEGRATION_TESTS_TARGET in deployment) bats tests/deployment-tests.bats ;; basic) bats tests/integration-tests.bats ;; http) bats tests/integration-tests-http.bats ;; cronjob) bats tests/integration-tests-cronjob.bats ;; prebuilt_functions) bats tests/integration-tests-prebuilt.bats ;; *) bats tests/deployment-tests.bats && \ bats tests/integration-tests.bats && \ bats tests/integration-tests-http.bats && \ bats tests/integration-tests-cronjob.bats ;; esac exit_code=$? # Just showing remaining k8s objects kubectl get all --all-namespaces if [ ${exit_code} -ne 0 -o -n "${TRAVIS_DUMP_LOGS}" ]; then echo "INFO: Build ERRORed, dumping logs: ##" for ns in kubeless default; do echo "### LOGs: namespace: ${ns} ###" kubectl get pod -n ${ns} -oname|xargs -I@ sh -xc "kubectl logs -n ${ns} @|sed 's|^|@: |'" done echo "INFO: Description" kubectl describe pod -l created-by=kubeless echo "INFO: LOGs: pod: kube-dns ###" kubectl logs -n kube-system -l k8s-app=kube-dns -c kubedns echo "INFO: LOGs: END" fi [ ${exit_code} -eq 0 ] && echo "INFO: $0: SUCCESS" || echo "ERROR: $0: FAILED" exit ${exit_code} # vim: sw=4 ts=4 et si ================================================ FILE: script/libtest.bash ================================================ # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # k8s and kubeless helpers, specially "wait"-ers on pod ready/deleted/etc KUBELESS_MANIFEST=kubeless-non-rbac.yaml KUBELESS_MANIFEST_RBAC=kubeless.yaml KAFKA_MANIFEST=kafka-zookeeper.yaml NATS_MANIFEST=nats.yaml KINESIS_MANIFEST=kinesis.yaml KUBECTL_BIN=$(which kubectl) : ${KUBECTL_BIN:?ERROR: missing binary: kubectl} export TEST_MAX_WAIT_SEC=300 # Workaround 'bats' lack of forced output support, dup() stderr fd exec 9>&2 echo_info() { test -z "$TEST_DEBUG" && return 0 echo "INFO: $*" >&9 } export -f echo_info kubectl() { ${KUBECTL_BIN:?} --context=${TEST_CONTEXT:?} "$@" } ## k8s specific Helper functions k8s_wait_for_pod_ready() { echo_info "Waiting for pod '${@}' to be ready ... " local -i cnt=${TEST_MAX_WAIT_SEC:?} # Retries just in case it is not stable local -i successCount=0 while [ "$successCount" -lt "3" ]; do if kubectl get pod "${@}" | grep -q Running; then ((successCount=successCount+1)) fi ((cnt=cnt-1)) || return 1 sleep 1 done } k8s_wait_for_pod_count() { local pod_cnt=${1:?}; shift echo_info "Waiting for pod '${@}' to have count==${pod_cnt} running ... " local -i cnt=${TEST_MAX_WAIT_SEC:?} # Retries just in case it is not stable local -i successCount=0 while [ "$successCount" -lt "3" ]; do if [[ $(kubectl get pod "${@}" -ogo-template='{{.items|len}}') == ${pod_cnt} ]]; then ((successCount=successCount+1)) fi ((cnt=cnt-1)) || return 1 sleep 1 done k8s_wait_for_pod_ready "${@}" echo "Finished waiting" } k8s_wait_for_uniq_pod() { k8s_wait_for_pod_count 1 "$@" } k8s_wait_for_pod_gone() { echo_info "Waiting for pod '${@}' to be gone ... " local -i cnt=${TEST_MAX_WAIT_SEC:?} until kubectl get pod "${@}" | grep -q No.resources.found; do ((cnt=cnt-1)) || return 1 sleep 1 done } k8s_wait_for_pod_logline() { local string="${1:?}"; shift local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for '${@}' to show logline '${string}' ..." until kubectl logs "${@}"| grep -q "${string}"; do ((cnt=cnt-1)) || return 1 sleep 1 done } k8s_wait_for_cluster_ready() { echo_info "Waiting for k8s cluster to be ready (context=${TEST_CONTEXT}) ..." _wait_for_cmd_ok kubectl get po 2>/dev/null && \ k8s_wait_for_pod_ready -n kube-system -l component=kube-addon-manager && \ k8s_wait_for_pod_ready -n kube-system -l k8s-app=kube-dns && \ return 0 return 1 } k8s_log_all_pods() { local namespaces=${*:?} ns for ns in ${*}; do echo "### namespace: ${ns} ###" kubectl get pod -n ${ns} -oname|xargs -I@ sh -xc "kubectl logs -n ${ns} @|sed 's|^|@: |'" done } k8s_context_save() { TEST_CONTEXT_SAVED=$(${KUBECTL_BIN} config current-context) # Kubeless doesn't support contexts yet, save+restore it # Don't save current_context if it's the same already [[ $TEST_CONTEXT_SAVED == $TEST_CONTEXT ]] && TEST_CONTEXT_SAVED="" # Save current_context [[ $TEST_CONTEXT_SAVED != "" ]] && \ echo_info "Saved context: '${TEST_CONTEXT_SAVED}'" && \ ${KUBECTL_BIN} config use-context ${TEST_CONTEXT} } k8s_context_restore() { # Restore saved context [[ $TEST_CONTEXT_SAVED != "" ]] && \ echo_info "Restoring context: '${TEST_CONTEXT_SAVED}'" && \ ${KUBECTL_BIN} config use-context ${TEST_CONTEXT_SAVED} } _wait_for_cmd_ok() { local cmd="${*:?}"; shift local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for '${*}' to successfully exit ..." until env ${cmd}; do ((cnt=cnt-1)) || return 1 sleep 1 done } ## Specific for kubeless kubeless_recreate() { local manifest_del=${1:?missing delete manifest} manifest_upd=${2:?missing update manifest} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Delete kubeless namespace, wait to be gone ... " kubectl delete -f ${manifest_del} || true kubectl delete namespace kubeless >& /dev/null || true while kubectl get namespace kubeless >& /dev/null; do ((cnt=cnt-1)) || return 1 sleep 1 done kubectl create namespace kubeless kubectl create -f ${manifest_upd} } kubeless_function_delete() { local func=${1:?}; shift echo_info "Deleting function "${func}" in case still present ... " kubeless function ls |grep -w "${func}" && kubeless function delete "${func}" >& /dev/null || true echo_info "Wait for function "${func}" to be deleted " local -i cnt=${TEST_MAX_WAIT_SEC:?} while kubectl get functions "${func}" >& /dev/null; do ((cnt=cnt-1)) || return 1 sleep 1 done } kubeless_kafka_trigger_delete() { local trigger=${1:?}; shift echo_info "Deleting kafka trigger "${trigger}" in case still present ... " kubeless trigger kafka list |grep -w "${trigger}" && kubeless trigger kafka delete "${trigger}" >& /dev/null || true } kubeless_nats_trigger_delete() { local trigger=${1:?}; shift echo_info "Deleting NATS trigger "${trigger}" in case still present ... " kubeless trigger nats list |grep -w "${trigger}" && kubeless trigger nats delete "${trigger}" >& /dev/null || true } kubeless_function_deploy() { local func=${1:?}; shift echo_info "Deploying function ..." kubeless function deploy ${func} ${@} } _wait_for_kubeless_controller_ready() { echo_info "Waiting for kubeless controller to be ready ... " k8s_wait_for_pod_ready -n kubeless -l kubeless=controller _wait_for_cmd_ok kubectl get functions 2>/dev/null } _wait_for_kubeless_controller_logline() { local string="${1:?}" k8s_wait_for_pod_logline "${string}" -n kubeless -l kubeless=controller -c kubeless-function-controller } wait_for_ingress() { echo_info "Waiting until Nginx pod is ready ..." local -i cnt=${TEST_MAX_WAIT_SEC:?} until kubectl get pods -l name=nginx-ingress-controller -n kube-system>& /dev/null; do ((cnt=cnt-1)) || exit 1 sleep 1 done } wait_for_kubeless_kafka_server_ready() { [[ $(kubectl get pod -n kubeless kafka-0 -ojsonpath='{.metadata.annotations.ready}') == true ]] && return 0 echo_info "Waiting for kafka-0 to be ready ..." k8s_wait_for_pod_logline "Kafka.*Server.*started" -n kubeless kafka-0 echo_info "Waiting for kafka-trigger-controller pod to be ready ..." k8s_wait_for_pod_ready -n kubeless -l kubeless=kafka-trigger-controller _wait_for_cmd_ok kubectl get kafkatriggers 2>/dev/null kubectl annotate pods --overwrite -n kubeless kafka-0 ready=true } wait_for_kubeless_nats_operator_ready() { echo_info "Waiting for NATS operator pod to be ready ..." k8s_wait_for_pod_ready -n nats-io -l name=nats-operator } wait_for_kubeless_nats_cluster_ready() { echo_info "Waiting for NATS cluster pods to be ready ..." k8s_wait_for_pod_ready -n nats-io -l nats_cluster=nats } wait_for_kubeless_nats_controller_ready() { echo_info "Waiting for NATS controller pods to be ready ..." k8s_wait_for_pod_ready -n kubeless -l kubeless=nats-trigger-controller } _wait_for_kubeless_kafka_topic_ready() { local topic=${1:?} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for kafka-0 topic='${topic}' to be ready ..." # zomg enter kafka-0 container to peek for topic already present until \ kubectl exec -n kubeless kafka-0 -- sh -c \ '/opt/bitnami/kafka/bin/kafka-topics.sh --list --zookeeper $( sed -n s/zookeeper.connect=//p /bitnami/kafka/conf/server.properties)'| \ grep -qw ${topic} do ((cnt=cnt-1)) || return 1 sleep 1 done } _wait_for_simple_function_pod_ready() { k8s_wait_for_pod_ready -l function=get-python } _deploy_simple_function() { make -C examples get-python } _call_simple_function() { # Artifact to dodge 'bats' lack of support for positively testing _for_ errors case "${1:?}" in 1) make -C examples get-python-verify | egrep Error.1;; 0) make -C examples get-python-verify;; esac } _delete_simple_function() { kubeless_function_delete get-python } ## Entry points used by 'bats' tests: verify_k8s_tools() { local tools="kubectl kubecfg kubeless" for exe in $tools; do which ${exe} >/dev/null && continue echo "ERROR: '${exe}' needs to be installed" return 1 done } verify_rbac_mode() { kubectl api-versions | grep -q rbac && return 0 echo "ERROR: Please run w/RBAC, eg minikube as: minikube start --extra-config=apiserver.Authorization.Mode=RBAC" return 1 } wait_for_endpoint() { local func=${1:?} local -i cnt=${TEST_MAX_WAIT_SEC:?} local endpoint=$(kubectl get endpoints -l function=$func | grep $func | awk '{print $2}') echo_info "Waiting for the endpoint ${endpoint}' to be ready ..." until curl -s $endpoint; do ((cnt=cnt-1)) || return 1 sleep 1 done } wait_for_autoscale() { local func=${1:?} local -i cnt=${TEST_MAX_WAIT_SEC:?} local hap=$() echo_info "Waiting for HAP ${func} to be ready ..." until kubectl get horizontalpodautoscalers | grep $func; do ((cnt=cnt-1)) || return 1 sleep 1 done } wait_for_job() { local func=${1:?} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for build job of ${func} to be finished ..." until kubectl get job -l function=${func} -o yaml | grep "succeeded: 1"; do ((cnt=cnt-1)) || return 1 sleep 1 done } test_must_fail_without_rbac_roles() { echo_info "RBAC TEST: function deploy/call must fail without RBAC roles" _delete_simple_function kubeless_recreate $KUBELESS_MANIFEST_RBAC $KUBELESS_MANIFEST _wait_for_kubeless_controller_logline "User.*cannot" } redeploy_with_rbac_roles() { kubeless_recreate $KUBELESS_MANIFEST_RBAC $KUBELESS_MANIFEST_RBAC _wait_for_kubeless_controller_ready _wait_for_kubeless_controller_logline "controller synced and ready" } deploy_kafka() { echo_info "Deploy kafka ... " kubectl create -f $KAFKA_MANIFEST } deploy_nats_operator() { echo_info "Deploy NATS operator ... " kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml } deploy_nats_cluster() { echo_info "Deploy NATS cluster ... " kubectl apply -f ./manifests/nats/nats-cluster.yaml -n nats-io } deploy_nats_trigger_controller() { echo_info "Deploy NATS trigger controller ... " kubectl create -f $NATS_MANIFEST } expose_nats_service() { kubectl get svc nats -n nats-io -o yaml | sed 's/ClusterIP/NodePort/' | kubectl replace -f - } deploy_kinesis_trigger_controller() { echo_info "Deploy Kinesis trigger controller ... " kubectl create -f $KINESIS_MANIFEST } wait_for_kubeless_kinesis_controller_ready() { echo_info "Waiting for Kinesis trigger controller pods to be ready ..." k8s_wait_for_pod_ready -n kubeless -l kubeless=kinesis-trigger-controller } deploy_kinesalite() { echo_info "Deploy Kinesalite a AWS Kinesis mock server ... " kubectl apply -f ./manifests/kinesis/kinesalite.yaml } wait_for_kinesalite_pod() { echo_info "Waiting for Kinesalite pod to be ready ..." k8s_wait_for_pod_ready -l app=kinesis } deploy_function() { local func=${1:?} func_topic echo_info "TEST: $func" kubeless_function_delete ${func} make -sC examples ${func} } deploy_kafka_trigger() { local trigger=${1:?} echo_info "TEST: $trigger" kubeless_kafka_trigger_delete ${trigger} make -sC examples ${trigger} } deploy_nats_trigger() { local trigger=${1:?} echo_info "TEST: $trigger" kubeless_nats_trigger_delete ${trigger} make -sC examples ${trigger} } verify_function() { local func=${1:?} local make_task=${2:-${func}-verify} echo_info "Init logs: $(kubectl logs -l function=${func} -c prepare)" k8s_wait_for_pod_ready -l function=${func} case "${func}" in *pubsub*) func_topic=$(kubectl get kafkatrigger "${func}" -o yaml|sed -n 's/topic: //p') echo_info "FUNC TOPIC: $func_topic" esac local -i counter=0 until make -sC examples ${make_task}; do echo_info "FUNC ${func} failed. Retrying..." ((counter=counter+1)) if [ "$counter" -ge 3 ]; then echo_info "FUNC ${func} failed ${counter} times. Exiting" return 1; fi sleep `expr 10 \* $counter` done } test_kubeless_function() { local func=${1:?} deploy_function $func verify_function $func } update_function() { local func=${1:?} func_topic echo_info "UPDATE: $func" make -sC examples ${func}-update sleep 10 k8s_wait_for_uniq_pod -l function=${func} } restart_function() { local func=${1:?} echo_info "Restarting: $func" kubectl delete pod -l function=${func} k8s_wait_for_uniq_pod -l function=${func} } test_kubeless_function_update() { local func=${1:?} update_function $func verify_function $func ${func}-update-verify } create_basic_auth_secret() { local secret=${1:?}; shift htpasswd -cb auth foo bar kubectl create secret generic $secret --from-file=auth } create_tls_secret_from_key_cert() { local secret=${1:?}; shift openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com" kubectl create secret tls $secret --key /tmp/tls.key --cert /tmp/tls.crt } create_http_trigger_with_tls_secret(){ local func=${1:?}; shift local domain=${1-""}; local subpath=${2-""}; local secret=${3-""}; delete_http_trigger ${func} echo_info "TEST: Creating HTTP trigger" local command="kubeless trigger http create ing-${func} --function-name ${func}" if [ -n "$domain" ]; then command="$command --hostname ${domain}" fi if [ -n "$subpath" ]; then command="$command --path ${subpath}" fi if [ -n "$secret" ]; then command="$command --tls-secret ${secret}" fi eval $command } create_http_trigger(){ local func=${1:?}; shift local domain=${1-""}; local subpath=${2-""}; local basicauth=${3-""}; local gateway=${4-""}; delete_http_trigger ${func} echo_info "TEST: Creating HTTP trigger" local command="kubeless trigger http create ing-${func} --function-name ${func}" if [ -n "$domain" ]; then command="$command --hostname ${domain}" fi if [ -n "$subpath" ]; then command="$command --path ${subpath}" fi if [ -n "$basicauth" ]; then command="$command --basic-auth-secret ${basicauth}" fi if [ -n "$gateway" ]; then command="$command --gateway ${gateway}" fi eval $command } update_http_trigger(){ local func=${1:?}; shift local domain=${1:-""} local subpath=${2:-""}; echo_info "TEST: Updating HTTP trigger" local command="kubeless trigger http update ing-${func} --function-name ${func}" if [ -n "$domain" ]; then command="$command --hostname ${domain}" fi if [ -n "$subpath" ]; then command="$command --path ${subpath}" fi eval $command } verify_http_trigger(){ local func=${1:?}; shift local ip=${1:?}; shift local expected_response=${1:?}; shift local domain=${1:?}; shift local subpath=${1:-""}; kubeless trigger http list | grep ${func} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for ingress to be ready..." until kubectl get ingress | grep $func | grep "$domain" | awk '{print $3}' | grep "$ip"; do ((cnt=cnt-1)) || return 1 sleep 1 done sleep 3 curl -vv --header "Host: $domain" $ip\/$subpath | grep "${expected_response}" } verify_http_trigger_basic_auth(){ local func=${1:?}; shift local ip=${1:?}; shift local expected_response=${1:?}; shift local domain=${1:?}; shift local subpath=${1:?}; shift local auth=${1:-""}; kubeless trigger http list | grep ${func} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for ingress to be ready..." until kubectl get ingress | grep $func | grep "$domain" | awk '{print $3}' | grep "$ip"; do ((cnt=cnt-1)) || return 1 sleep 1 done sleep 3 curl -v --header "Host: $domain" $ip\/$subpath | grep "401 Authorization Required" curl -v --header "Host: $domain" -u $auth $ip\/$subpath | grep "${expected_response}" } verify_https_trigger(){ local func=${1:?}; shift local ip=${1:?}; shift local expected_response=${1:?}; shift local domain=${1:?}; shift local subpath=${1:-""}; kubeless trigger http list | grep ${func} local -i cnt=${TEST_MAX_WAIT_SEC:?} echo_info "Waiting for ingress to be ready..." until kubectl get ingress | grep $func | grep "$domain" | awk '{print $3}' | grep "$ip"; do ((cnt=cnt-1)) || return 1 sleep 1 done sleep 3 curl -k -vv --header "Host: $domain" https:\/\/$ip\/$subpath | grep "${expected_response}" } delete_http_trigger() { local func=${1:?}; shift kubeless trigger http list |grep -w ing-${func} && kubeless trigger http delete ing-${func} >& /dev/null || true } create_cronjob_trigger(){ local func=${1:?}; shift local schedule=${1:?}; delete_cronjob_trigger ${func} echo_info "TEST: Creating CronJob trigger" kubeless trigger cronjob create ${func} --function ${func} --schedule "${schedule}" } update_cronjob_trigger(){ local func=${1:?}; shift local schedule=${1:?}; echo_info "TEST: Updating CronJob trigger" kubeless trigger cronjob update ${func} --function ${func} --schedule "${schedule}" } verify_cronjob_trigger(){ local func=${1:?}; shift local schedule=${1:?}; shift local expected_log=${1:?} local -i cnt=${TEST_MAX_WAIT_SEC:?} kubeless trigger cronjob list | grep ${func} | grep "${schedule}" echo_info "Waiting for CronJob to be executed..." until kubectl logs -l function=${func} | grep "$expected_log"; do ((cnt=cnt-1)) || return 1 sleep 1 done } delete_cronjob_trigger() { local func=${1:?}; shift kubeless trigger cronjob list |grep -w ${func} && kubeless trigger cronjob delete ${func} >& /dev/null || true } test_kubeless_autoscale() { local func=${1:?} exp_autoscale act_autoscale # Use some fixed values local val=10 num=3 echo_info "TEST: autoscale ${func}" kubeless autoscale create ${func} --value ${val:?} --min ${num:?} --max ${num:?} wait_for_autoscale ${func} kubeless autoscale list | fgrep -w ${func} act_autoscale=$(kubectl get horizontalpodautoscaler -ojsonpath='{range .items[*].spec}{@.scaleTargetRef.name}:{@.targetCPUUtilizationPercentage}:{@.minReplicas}:{@.maxReplicas}{end}') exp_autoscale="${func}:${val}:${num}:${num}" [[ ${act_autoscale} == ${exp_autoscale} ]] k8s_wait_for_pod_count ${num} -l function="${func}" kubeless autoscale delete ${func} } test_topic_deletion() { local topic=$RANDOM local topic_count=0 kubeless topic create $topic kubeless topic delete $topic topic_count=$(kubeless topic list | grep $topic | wc -l) if [ ${topic_count} -gt 0 ] ; then echo_info "Topic $topic still exists" exit 200 fi } sts_restart() { local num=1 kubectl delete pod kafka-0 -n kubeless kubectl delete pod zoo-0 -n kubeless k8s_wait_for_uniq_pod -l kubeless=zookeeper -n kubeless k8s_wait_for_uniq_pod -l kubeless=kafka -n kubeless wait_for_kubeless_kafka_server_ready } verify_clean_object() { local type=${1:?}; shift local name=${1:?}; shift echo_info "Checking if "${type}" exists for function "${name}"... " local -i cnt=${TEST_MAX_WAIT_SEC:?} until [[ ! $(kubectl get ${type} 2>&1 | grep ${name}) ]]; do ((cnt=cnt-1)) || return 1 sleep 1 echo_info "$(kubectl get ${type} 2>&1 | grep ${name})" done echo_info "${type}/${name} is gone" } # vim: sw=4 ts=4 et si ================================================ FILE: script/pull-or-build-image.sh ================================================ #!/bin/bash set -e TARGET=${1:?} function push() { local image=${1:?} if [[ -n "$DOCKER_USERNAME" && -n "$DOCKER_PASSWORD" ]]; then docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" docker push $image fi } case "${TARGET}" in "function-controller") image=${CONTROLLER_IMAGE:?} docker pull $image || make $TARGET CONTROLLER_IMAGE=$image push $image ;; "function-image-builder") image=${FUNCTION_IMAGE_BUILDER:?} docker pull $image || make $TARGET FUNCTION_IMAGE_BUILDER=$image push $image ;; "default") echo "Unsupported target" exit 1 esac ================================================ FILE: script/release_utils.sh ================================================ #!/bin/bash set -e function commit_list { local tag=${1:?} local repo_domain=${2:?} local repo_name=${3:?} git fetch --tags local previous_tag=`curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$repo_domain/$repo_name/tags | jq --raw-output '.[1].name'` local release_notes=`git log $previous_tag..$tag --oneline` local parsed_release_notes=$(echo "$release_notes" | sed -n -e 'H;${x;s/\n/\\n- /g;s/^\\n//;s/"/\\"/g;p;}') echo $parsed_release_notes } function get_release_notes { local tag=${1:?} local repo_domain=${2:?} local repo_name=${3:?} commits=`commit_list $tag $repo_domain $repo_name` notes=$(echo "\ This release includes the following commits and features:\\n\ $commits\\n\\n\ To install this latest version, use the manifest that is part of the release:\\n\ \\n\ **WITH RBAC ENABLED:**\\n\ \\n\ \`\`\`console\\n\ kubectl create ns kubeless\\n\ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-$tag.yaml \\n\ \`\`\`\\n\ \\n\ **WITHOUT RBAC:**\\n\ \\n\ \`\`\`console\\n\ kubectl create ns kubeless\\n\ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-non-rbac-$tag.yaml \\n\ \`\`\`\\n\ **OPENSHIFT:**\\n\ \\n\ \`\`\`console\\n\ oc create ns kubeless\\n\ oc create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-openshift-$tag.yaml \\n\ # Kafka\\n\ oc create -f https://github.com/kubeless/kubeless/releases/download/$tag/kafka-zookeeper-openshift-$tag.yaml \\n\ \`\`\`\\n\ ") echo "${notes}" } function get_release_body { local tag=${1:?} local repo_domain=${2:?} local repo_name=${3:?} local release_notes=$(get_release_notes $tag $repo_domain $repo_name) echo '{ "tag_name": "'$tag'", "target_commitish": "master", "name": "'$tag'", "body": "'$release_notes'", "draft": true, "prerelease": false }' } function update_release_tag { local tag=${1:?} local repo_domain=${2:?} local repo_name=${3:?} local release_id=$(curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$repo_domain/$repo_name/releases | jq --raw-output '.[0].id') local body=$(get_release_body $tag $repo_domain $repo_name) local release=`curl -H "Authorization: token $ACCESS_TOKEN" -s --request PATCH --data $body https://api.github.com/repos/$repo_domain/$repo_name/releases/$release_id` echo $release } function release_tag { local tag=$1 local repo_domain=${2:?} local repo_name=${3:?} local body=$(get_release_body $tag $repo_domain $repo_name) local release=`curl -H "Authorization: token $ACCESS_TOKEN" -s --request POST --data "$body" https://api.github.com/repos/$repo_domain/$repo_name/releases` echo $release } function upload_asset { local repo_domain=${1:?} local repo_name=${2:?} local release_id=${3:?} local asset=${4:?} local filename=$(basename $asset) if [[ "$filename" == *".zip" ]]; then local content_type="application/zip" elif [[ "$filename" == *".yaml" ]]; then local content_type="text/yaml" fi curl -H "Authorization: token $ACCESS_TOKEN" \ -H "Content-Type: $content_type" \ --data-binary @"$asset" \ "https://uploads.github.com/repos/$repo_domain/$repo_name/releases/$release_id/assets?name=$filename" } ================================================ FILE: script/start-gke-env.sh ================================================ #!/bin/bash CLUSTER=${1:?} ZONE=${2:?} BRANCH=${3:?} ADMIN=${4:?} # Resolve latest version from a branch VERSION=$(gcloud container get-server-config --zone $ZONE --format='yaml(validMasterVersions)' 2> /dev/null | grep $BRANCH | awk '{print $2}' | head -n 1) function clean() { local resource=${1:?} kubectl get $resource | awk '{print $1}' | xargs kubectl delete $resource || true } if ! gcloud container clusters list; then echo "Unable to access gcloud project" exit 1 fi if gcloud container clusters list | grep -q $CLUSTER; then echo "GKE cluster already exits. Deleting resources" # Cluster already exists, make sure it is clean gcloud container clusters get-credentials $CLUSTER --zone $ZONE kubectl delete ns kubeless || true resources=( cronjobs jobs deployments horizontalpodautoscalers ) for res in "${resources[@]}"; do clean $res done echo "Removing clusterroles" >&9 kubectl delete clusterrole kubeless-controller-deployer || true kubectl delete clusterrole kafka-controller-deployer || true kubectl delete clusterrolebindings kubeless-controller-deployer || true kubectl delete clusterrolebindings kafka-controller-deployer || true echo "Removing customresourcecleanup.apiextensions.k8s.io finalizer from CRD's" >&9 kubectl patch crd/functions.kubeless.io -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl patch crd/cronjobtriggers.kubeless.io -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl patch crd/httptriggers.kubeless.io -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl patch crd/kafkatriggers.kubeless.io -p '{"metadata":{"finalizers":[]}}' --type=merge || true echo "Removing finalizers from CRD object's and deleting the CRD objects" >&9 functions=$(kubectl get functions -o name) for func in $functions; do kubectl patch $func -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl delete $func done cronjobtriggers=$(kubectl get cronjobtriggers -o name) for trigger in $cronjobtriggers; do kubectl patch $trigger -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl delete $trigger done httptriggers=$(kubectl get httptriggers -o name) for trigger in $httptriggers; do kubectl patch $trigger -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl delete $trigger done kafkatriggers=$(kubectl get kafkatriggers -o name) for trigger in $kafkatriggers; do kubectl patch $trigger -p '{"metadata":{"finalizers":[]}}' --type=merge || true kubectl delete $trigger done echo "Deleting CRD's" >&9 kubectl delete crd functions.kubeless.io || true kubectl delete crd cronjobtriggers.kubeless.io || true kubectl delete crd httptriggers.kubeless.io || true kubectl delete crd kafkatriggers.kubeless.io || true else echo "Creating cluster $CLUSTER in $ZONE (v$VERSION)" gcloud container clusters create --cluster-version=$VERSION --zone $ZONE $CLUSTER --num-nodes 5 --machine-type=n1-standard-2 # Wait for the cluster to respond cnt=20 until kubectl get pods; do ((cnt=cnt-1)) || (echo "Waited 20 seconds but cluster is not reachable" && return 1) sleep 1 done kubectl create clusterrolebinding kubeless-cluster-admin --clusterrole=cluster-admin --user=$ADMIN fi ================================================ FILE: script/start-test-environment.sh ================================================ #!/bin/bash set -e SCRIPT=$0 if [ -h $SCRIPT ]; then SCRIPT=`readlink $SCRIPT` fi ROOTDIR=`cd $(dirname $SCRIPT)/.. && pwd` COMMAND="${@:-bash}" if ! minikube status | grep -q "minikube: $"; then echo "Unable to start the test environment with an existing instance of minikube" echo "Delete the current profile executing 'minikube delete' or create a new one" echo "executing 'minikube profile new_profile'" exit 1 fi minikube start --extra-config=apiserver.authorization-mode=RBAC --insecure-registry 0.0.0.0/0 eval $(minikube docker-env) CONTEXT=$(kubectl config current-context) # Both RBAC'd dind and minikube seem to be missing rules to make kube-dns work properly # add some (granted) broad ones: kubectl --context=${CONTEXT} get clusterrolebinding kube-dns-admin >& /dev/null || \ kubectl --context=${CONTEXT} create clusterrolebinding kube-dns-admin --serviceaccount=kube-system:default --clusterrole=cluster-admin docker run --privileged -it \ -v $ROOTDIR:/go/src/github.com/kubeless/kubeless \ -v $HOME/.kube:/root/.kube \ -v $HOME/.minikube:$HOME/.minikube \ -e TEST_CONTEXT=$(kubectl config current-context) \ -e TEST_DEBUG=1 \ kubeless/dev-environment:latest bash -c "$COMMAND" ================================================ FILE: script/upload_release_notes.sh ================================================ #!/bin/bash set -e REPO_NAME=kubeless REPO_DOMAIN=kubeless source $(dirname $0)/release_utils.sh if [[ -z "$REPO_NAME" || -z "$REPO_DOMAIN" ]]; then echo "Github repository not specified" > /dev/stderr exit 1 fi if [[ -z "$ACCESS_TOKEN" ]]; then echo "Unable to release: Github Token not specified" > /dev/stderr exit 1 fi repo_check=`curl -H "Authorization: token $ACCESS_TOKEN" -s https://api.github.com/repos/$REPO_DOMAIN/$REPO_NAME` if [[ $repo_check == *"Not Found"* ]]; then echo "Not found a Github repository for $REPO_DOMAIN/$REPO_NAME, it is not possible to publish it" > /dev/stderr exit 1 else update_release_tag $1 $REPO_DOMAIN $REPO_DOMAIN fi ================================================ FILE: script/validate-git-marks ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source "$(dirname "$BASH_SOURCE")/.validate" # folders=$(find * -type d | egrep -v '^Godeps|bundles|.git') IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*' | grep -v '^vendor/' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do if [ $(grep -r "^<<<<<<<" $f) ]; then badFiles+=( "$f" ) continue fi if [ $(grep -r "^>>>>>>>" $f) ]; then badFiles+=( "$f" ) continue fi if [ $(grep -r "^=======$" $f) ]; then badFiles+=( "$f" ) continue fi set -e done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! There is no conflict.' else { echo "There is trace of conflict(s) in the following files :" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please fix the conflict(s) commit the result.' echo } >&2 false fi ================================================ FILE: script/validate-gofmt ================================================ #!/bin/bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source "$(dirname "$BASH_SOURCE")/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\|kubeless.tpl.go' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed is formatted if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All Go source files are properly formatted.' else { echo "These files are not properly gofmt'd:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' echo } >&2 false fi ================================================ FILE: script/validate-lint ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source "$(dirname "$BASH_SOURCE")/.validate" # We will eventually get to the point where packages should be the complete list # of subpackages, vendoring excluded, as given by: # IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\|^pkg/client/\|^pkg/apis/kubeless/v1beta1/zz_generated.deepcopy.go\|^integration\|kubeless.tpl.go' || true) ) unset IFS errors=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed passes go lint failedLint=$(golint "$f") if [ "$failedLint" ]; then errors+=( "$failedLint" ) fi done if [ ${#errors[@]} -eq 0 ]; then echo 'Congratulations! All Go source files have been linted.' else { echo "Errors from golint:" for err in "${errors[@]}"; do echo "$err" done echo echo 'Please fix the above errors. You can test via "golint" and commit the result.' echo } >&2 false fi ================================================ FILE: script/validate-test ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Simplify once `./...` ignores `vendor/` go test \ github.com/kubeless/kubeless/cmd/... \ github.com/kubeless/kubeless/pkg/... \ github.com/kubeless/kubeless/version/... ================================================ FILE: script/validate-vet ================================================ #!/usr/bin/env bash # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source "$(dirname "$BASH_SOURCE")/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\|kubeless.tpl.go' || true) ) unset IFS failed=0 for f in "${files[@]}"; do # we use "git show" here to validate that what's committed passes go tool vet if ! go vet "$f"; then failed=1 fi done exit $failed ================================================ FILE: tests/deployment-tests.bats ================================================ # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest @test "Verify TEST_CONTEXT envvar" { : ${TEST_CONTEXT:?} } @test "Verify needed kubernetes tools installed" { verify_k8s_tools } @test "Verify k8s RBAC mode" { verify_rbac_mode } @test "Test simple function failure without RBAC rules" { test_must_fail_without_rbac_roles } @test "Redeploy with proper RBAC rules" { redeploy_with_rbac_roles } ================================================ FILE: tests/integration-tests-cronjob.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest @test "Create Cronjob Trigger" { deploy_function get-python verify_function get-python create_cronjob_trigger get-python '* * * * *' verify_cronjob_trigger get-python '* * * * *' '"GET / HTTP/1.1" 200' update_cronjob_trigger get-python '*/60 * * * *' verify_cronjob_trigger get-python '*/60 * * * *' '"GET / HTTP/1.1" 200' delete_cronjob_trigger get-python verify_clean_object cronjobtrigger get-python } @test "Test no-errors" { if kubectl logs -n kubeless -l kubeless=controller | grep "level=error"; then echo "Found errors in the controller logs" false fi } ================================================ FILE: tests/integration-tests-http.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest @test "Wait for Ingress" { wait_for_ingress } @test "Create HTTP Trigger" { deploy_function get-python verify_function get-python create_http_trigger get-python "test.domain" verify_http_trigger get-python $(minikube ip) "hello.*world" "test.domain" update_http_trigger get-python "test.domain-updated" verify_http_trigger get-python $(minikube ip) "hello.*world" "test.domain-updated" delete_http_trigger get-python verify_clean_object httptrigger ing-get-python verify_clean_object ingress ing-get-python } @test "Create HTTP Trigger with a path" { deploy_function get-python verify_function get-python create_http_trigger get-python "test.domain" "get-python" verify_http_trigger get-python $(minikube ip) "hello.*world" "test.domain" "get-python" delete_http_trigger get-python verify_clean_object httptrigger ing-get-python verify_clean_object ingress ing-get-python } @test "Create HTTP Trigger with TLS private key and certificate" { deploy_function get-python verify_function get-python create_tls_secret_from_key_cert foo-secret create_http_trigger_with_tls_secret get-python "foo.bar.com" "get-python" "foo-secret" verify_https_trigger get-python $(minikube ip) "hello.*world" "foo.bar.com" "get-python" delete_http_trigger get-python verify_clean_object httptrigger ing-get-python verify_clean_object ingress ing-get-python } @test "Create HTTP Trigger with basic auth" { deploy_function get-python verify_function get-python create_basic_auth_secret "basic-auth" create_http_trigger get-python "test.domain" "get-python" "basic-auth" "nginx" verify_http_trigger_basic_auth get-python $(minikube ip) "hello.*world" "test.domain" "get-python" "foo:bar" delete_http_trigger get-python verify_clean_object httptrigger ing-get-python verify_clean_object ingress ing-get-python } @test "Test no-errors" { if kubectl logs -n kubeless -l kubeless=controller | grep "level=error"; then echo "Found errors in the controller logs" false fi } ================================================ FILE: tests/integration-tests-kafka.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest # 'bats' lacks loop support, unroll-them-all -> @test "Wait for kafka" { deploy_kafka wait_for_kubeless_kafka_server_ready } @test "Test function: pubsub-python" { deploy_function pubsub-python verify_function pubsub-python kubeless_function_delete pubsub-python } @test "Test function: pubsub-python34" { deploy_function pubsub-python34 verify_function pubsub-python34 kubeless_function_delete pubsub-python34 } @test "Test 1:n association between Kafka trigger and functions" { deploy_function kafka-python-func1-topic-s3-python deploy_function kafka-python-func2-topic-s3-python deploy_kafka_trigger s3-python-kafka-trigger verify_function kafka-python-func1-topic-s3-python verify_function kafka-python-func2-topic-s3-python kubeless_function_delete kafka-python-func1-topic-s3-python kubeless_function_delete kafka-python-func2-topic-s3-python } @test "Test function: pubsub-nodejs" { deploy_function pubsub-nodejs verify_function pubsub-nodejs test_kubeless_function_update pubsub-nodejs kubeless_function_delete pubsub-nodejs } @test "Test function: pubsub-ruby" { deploy_function pubsub-ruby verify_function pubsub-ruby kubeless_function_delete pubsub-ruby } @test "Test function: pubsub-go" { deploy_function pubsub-go verify_function pubsub-go kubeless_function_delete pubsub-go } @test "Test topic list" { wait_for_kubeless_kafka_server_ready for topic in topic1 topic2; do kubeless topic create $topic _wait_for_kubeless_kafka_topic_ready $topic done kubeless topic list >$BATS_TMPDIR/kubeless-topic-list grep -qxF topic1 $BATS_TMPDIR/kubeless-topic-list grep -qxF topic2 $BATS_TMPDIR/kubeless-topic-list } @test "Test topic deletion" { test_topic_deletion } @test "Verify Kafka after restart (if context=='minikube')" { local topic=$RANDOM kubeless topic create $topic sts_restart kubeless topic list | grep $topic } # vim: ts=2 sw=2 si et syntax=sh ================================================ FILE: tests/integration-tests-kinesis.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest # 'bats' lacks loop support, unroll-them-all -> @test "Deploy and wait for Kinesalite" { deploy_kinesis_trigger_controller wait_for_kubeless_kinesis_controller_ready deploy_kinesalite wait_for_kinesalite_pod } @test "Test function: stream-python-kinesis" { deploy_function python-kinesis verify_function python-kinesis kubeless_function_delete python-kinesis } @test "Test function: stream-multi-record-pubish-python-kinesis" { deploy_function python-kinesis-multi-record verify_function python-kinesis-multi-record kubeless_function_delete python-kinesis-multi-record } ================================================ FILE: tests/integration-tests-nats.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest # 'bats' lacks loop support, unroll-them-all -> @test "Deploy and wait for NATS" { deploy_nats_operator wait_for_kubeless_nats_operator_ready deploy_nats_trigger_controller wait_for_kubeless_nats_controller_ready deploy_nats_cluster wait_for_kubeless_nats_cluster_ready expose_nats_service } @test "Test function: pubsub-python-nats" { deploy_function python-nats verify_function python-nats kubeless_function_delete python-nats } @test "Test 1:n association between NATS trigger and functions" { deploy_function nats-python-func1-topic-test deploy_function nats-python-func2-topic-test deploy_nats_trigger nats-python-trigger-topic-test verify_function nats-python-func1-topic-test verify_function nats-python-func2-topic-test kubeless_function_delete nats-python-func1-topic-test kubeless_function_delete nats-python-func2-topic-test } @test "Test 1:n association between function and NATS triggers" { deploy_function nats-python-func-multi-topic deploy_nats_trigger nats-python-trigger-topic1 deploy_nats_trigger nats-python-trigger-topic2 verify_function nats-python-func-multi-topic kubeless_function_delete nats-python-func-multi-topic } ================================================ FILE: tests/integration-tests-prebuilt.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest @test "Ensure build step" { kubectl get -n kubeless configMap kubeless-config -o yaml | grep enable-build-step | grep true kubectl get -n kubeless configMap kubeless-config -o yaml | grep function-registry-tls-verify | grep false kubectl get secret kubeless-registry-credentials || kubectl create secret docker-registry kubeless-registry-credentials \ --docker-server=http://$(minikube ip):5000/v2 \ --docker-username="user" \ --docker-password="password" \ --docker-email="email" } @test "Deploy a function using the build system" { deploy_function get-python wait_for_job get-python curl http://$(minikube ip):5000/v2/_catalog # Speed up pod start when the image is ready restart_function get-python verify_function get-python kubectl logs -n kubeless -l kubeless=controller -c kubeless-function-controller | grep "Started function build job" kubectl get deployment -o yaml get-python | grep image | grep $(minikube ip):5000 } @test "Deploy a Golang function using the build system" { deploy_function get-go-deps wait_for_job get-go-deps # Speed up pod start when the image is ready restart_function get-go-deps verify_function get-go-deps kubectl get deployment -o yaml get-go-deps | grep image | grep $(minikube ip):5000 } @test "Test no-errors" { if kubectl logs -n kubeless -l kubeless=controller | grep "level=error"; then echo "Found errors in the controller logs" false fi } ================================================ FILE: tests/integration-tests.bats ================================================ #!/usr/bin/env bats # Copyright (c) 2016-2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load ../script/libtest # 'bats' lacks loop support, unroll-them-all -> @test "Deploy functions to evaluate" { deploy_function get-python deploy_function get-python-deps deploy_function get-python-deps-tar-gz deploy_function get-python-deps-tar-bz2 deploy_function get-python-deps-tar-xz deploy_function get-python-custom-port deploy_function timeout-nodejs deploy_function get-nodejs-multi deploy_function get-python-metadata deploy_function get-python-secrets deploy_function post-python deploy_function custom-get-python deploy_function get-python-url-deps deploy_function get-node-url-zip deploy_function get-node-url-tar-gz deploy_function get-node-url-tar-bz2 deploy_function get-node-url-tar-xz } @test "Test function: get-python" { verify_function get-python } @test "Test function: get-python-deps" { verify_function get-python-deps } @test "Test function: get-python-deps-tar-gz" { verify_function get-python-deps-tar-gz kubeless_function_delete get-python-deps-tar-gz } @test "Test function: get-python-deps-tar-bz2" { verify_function get-python-deps-tar-bz2 kubeless_function_delete get-python-deps-tar-bz2 } @test "Test function: get-python-deps-tar-xz" { verify_function get-python-deps-tar-xz kubeless_function_delete get-python-deps-tar-xz } @test "Test function: get-python-custom-port" { verify_function get-python-custom-port } @test "Test function update: get-python" { test_kubeless_function_update get-python } @test "Test function update: get-python-deps" { test_kubeless_function_update get-python-deps kubeless_function_delete get-python-deps } @test "Test function autoscale: get-python" { if kubectl api-versions | tr '\n' ' ' | grep -q -v "autoscaling/v2beta1"; then skip "Autoscale is only supported for Kubernetes >= 1.8" fi test_kubeless_autoscale get-python kubeless_function_delete get-python } @test "Test function: timeout-nodejs" { verify_function timeout-nodejs kubeless_function_delete timeout-nodejs } @test "Test function: get-nodejs-multi" { verify_function get-nodejs-multi kubeless_function_delete get-nodejs-multi } @test "Test custom runtime image" { verify_function custom-get-python test_kubeless_function_update custom-get-python kubeless_function_delete custom-get-python } @test "Test function: post-python" { verify_function post-python kubeless_function_delete post-python } @test "Test function: get-python-metadata" { verify_function get-python-metadata kubeless_function_delete get-python-metadata } @test "Test function: get-python-secrets" { verify_function get-python-secrets kubeless_function_delete get-python-secrets } @test "Test no-errors" { if kubectl logs -n kubeless -l kubeless=controller | grep "level=error"; then echo "Found errors in the controller logs" false fi } @test "Test function: get-python-url-deps" { verify_function get-python-url-deps kubeless_function_delete get-python-url-deps } @test "Test function: get-node-url-zip" { verify_function get-node-url-zip kubeless_function_delete get-node-url-zip } @test "Test function: get-node-url-tar-gz" { verify_function get-node-url-tar-gz kubeless_function_delete get-node-url-tar-gz } @test "Test function: get-node-url-tar-bz2" { verify_function get-node-url-tar-bz2 kubeless_function_delete get-node-url-tar-bz2 } @test "Test function: get-node-url-tar-xz" { verify_function get-node-url-tar-xz kubeless_function_delete get-node-url-tar-xz } # vim: ts=2 sw=2 si et syntax=sh